aboutsummaryrefslogtreecommitdiffstats
path: root/server.py
blob: 2c53d0cfc18db586bc74c861336b14c9015d3668 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
#!/usr/bin/env python3
from gevent import monkey
monkey.patch_all()
import gevent.socket

from youtube import yt_app
from youtube import util

# these are just so the files get run - they import yt_app and add routes to it
from youtube import watch, search, playlist, channel, local_playlist, comments, subscriptions

import settings

from gevent.pywsgi import WSGIServer
import urllib
import urllib3
import socket
import socks, sockshandler
import subprocess
import re
import sys
import time


def youtu_be(env, start_response):
    id = env['PATH_INFO'][1:]
    env['PATH_INFO'] = '/watch'
    if not env['QUERY_STRING']:
        env['QUERY_STRING'] = 'v=' + id
    else:
        env['QUERY_STRING'] += '&v=' + id
    yield from yt_app(env, start_response)


RANGE_RE = re.compile(r'bytes=(\d+-(?:\d+)?)')
def parse_range(range_header, content_length):
    # Range header can be like bytes=200-1000 or bytes=200-
    # amount_received is the length of bytes from the range that have already
    # been received
    match = RANGE_RE.fullmatch(range_header.strip())
    if not match:
        print('Unsupported range header format:', range_header)
        return None
    start, end = match.group(1).split('-')
    start_byte = int(start)
    if not end:
        end_byte = start_byte + content_length - 1
    else:
        end_byte = int(end)
    return start_byte, end_byte


def proxy_site(env, start_response, video=False):
    send_headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64)',
        'Accept': '*/*',
    }
    current_range_start = 0
    range_end = None
    if 'HTTP_RANGE' in env:
        send_headers['Range'] = env['HTTP_RANGE']

    url = "https://" + env['SERVER_NAME'] + env['PATH_INFO']
    # remove /name portion
    if video and '/videoplayback/name/' in url:
        url = url[0:url.rfind('/name/')]
    if env['QUERY_STRING']:
        url += '?' + env['QUERY_STRING']

    try_num = 1
    first_attempt = True
    current_attempt_position = 0
    while try_num <= 3:   # Try a given byte position three times
        if not first_attempt:
            print('(Try %d)' % try_num, 'Trying with', send_headers['Range'])

        if video:
            params = urllib.parse.parse_qs(env['QUERY_STRING'])
            params_use_tor = int(params.get('use_tor', '0')[0])
            use_tor = (settings.route_tor == 2) or params_use_tor
            response, cleanup_func = util.fetch_url_response(url, send_headers,
                                                             use_tor=use_tor,
                                                             max_redirects=10)
        else:
            response, cleanup_func = util.fetch_url_response(url, send_headers)

        response_headers = response.headers
        if isinstance(response_headers, urllib3._collections.HTTPHeaderDict):
            response_headers = response_headers.items()
        if video:
            response_headers = (list(response_headers)
                                +[('Access-Control-Allow-Origin', '*')])

        if first_attempt:
            start_response(str(response.status) + ' ' + response.reason,
                           response_headers)

        content_length = int(dict(response_headers).get('Content-Length', 0))
        if response.status >= 400:
            print('Error: YouTube returned "%d %s" while routing %s' % (
                response.status, response.reason, url.split('?')[0]))

        total_received = 0
        retry = False
        while True:
            # a bit over 3 seconds of 360p video
            # we want each TCP packet to transmit in large multiples,
            # such as 65,536, so we shouldn't read in small chunks
            # such as 8192 lest that causes the socket library to limit the
            # TCP window size
            # Might need fine-tuning, since this gives us 4*65536
            # The tradeoff is that larger values (such as 6 seconds) only
            # allows video to buffer in those increments, meaning user must
            # wait until the entire chunk is downloaded before video starts
            # playing
            content_part = response.read(32*8192)
            total_received += len(content_part)
            if not content_part:
                # Sometimes YouTube closes the connection before sending all of
                # the content. Retry with a range request for the missing
                # content. See
                # https://github.com/user234683/youtube-local/issues/40
                if total_received < content_length:
                    if 'Range' in send_headers:
                        int_range = parse_range(send_headers['Range'],
                                                content_length)
                        if not int_range: # give up b/c unrecognized range
                            break
                        start, end = int_range
                    else:
                        start, end = 0, (content_length - 1)

                    fail_byte = start + total_received
                    send_headers['Range'] = 'bytes=%d-%d' % (fail_byte, end)
                    print(
                        'Warning: YouTube closed the connection before byte',
                        str(fail_byte) + '.', 'Expected', start+content_length,
                        'bytes.'
                    )

                    retry = True
                    first_attempt = False
                    if fail_byte == current_attempt_position:
                        try_num += 1
                    else:
                        try_num = 1
                        current_attempt_position = fail_byte
                break
            yield content_part
        cleanup_func(response)
        if retry:
            # YouTube will return 503 Service Unavailable if you do a bunch
            # of range requests too quickly.
            time.sleep(1)
            continue
        else:
            break
    else: # no break
        print('Error: YouTube closed the connection before',
              'providing all content. Retried three times:', url.split('?')[0])


def proxy_video(env, start_response):
    yield from proxy_site(env, start_response, video=True)


site_handlers = {
    'youtube.com': yt_app,
    'youtube-nocookie.com': yt_app,
    'youtu.be': youtu_be,
    'ytimg.com': proxy_site,
    'ggpht.com': proxy_site,
    'googleusercontent.com': proxy_site,
    'sponsor.ajay.app': proxy_site,
    'googlevideo.com': proxy_video,
}


def split_url(url):
    ''' Split https://sub.example.com/foo/bar.html into ('sub.example.com', '/foo/bar.html')'''
    # XXX: Is this regex safe from REDOS?
    # python STILL doesn't have a proper regular expression engine like grep uses built in...
    match = re.match(r'(?:https?://)?([\w-]+(?:\.[\w-]+)+?)(/.*|$)', url)
    if match is None:
        raise ValueError('Invalid or unsupported url: ' + url)

    return match.group(1), match.group(2)


def error_code(code, start_response):
    start_response(code, ())
    return code.encode()


def site_dispatch(env, start_response):
    client_address = env['REMOTE_ADDR']
    try:
        # correct malformed query string with ? separators instead of &
        env['QUERY_STRING'] = env['QUERY_STRING'].replace('?', '&')

        # Fix PATH_INFO for UWSGI
        if 'REQUEST_URI' in env:
            env['PATH_INFO'] = urllib.parse.unquote(
                env['REQUEST_URI'].split('?')[0]
            )

        method = env['REQUEST_METHOD']
        path = env['PATH_INFO']

        if (method == "POST"
                and client_address not in ('127.0.0.1', '::1')
                and not settings.allow_foreign_post_requests):
            yield error_code('403 Forbidden', start_response)
            return

        # redirect localhost:8080 to localhost:8080/https://youtube.com
        if path == '' or path == '/':
            start_response('302 Found', [('Location', '/https://youtube.com')])
            return

        try:
            env['SERVER_NAME'], env['PATH_INFO'] = split_url(path[1:])
        except ValueError:
            yield error_code('404 Not Found', start_response)
            return

        base_name = ''
        for domain in reversed(env['SERVER_NAME'].split('.')):
            if base_name == '':
                base_name = domain
            else:
                base_name = domain + '.' + base_name

            try:
                handler = site_handlers[base_name]
            except KeyError:
                continue
            else:
                yield from handler(env, start_response)
                break
        else:   # did not break
            yield error_code('404 Not Found', start_response)
            return
    except Exception:
        start_response('500 Internal Server Error', ())
        yield b'500 Internal Server Error'
        raise
    return


class FilteredRequestLog:
    '''Don't log noisy thumbnail and avatar requests'''
    filter_re = re.compile(r'''(?x)
                            "GET\ /https://(
                            i[.]ytimg[.]com/|
                            www[.]youtube[.]com/data/subscription_thumbnails/|
                            yt3[.]ggpht[.]com/|
                            www[.]youtube[.]com/api/timedtext|
                            [-\w]+[.]googlevideo[.]com/).*"\ (200|206)
                            ''')

    def __init__(self):
        pass

    def write(self, s):
        if not self.filter_re.search(s):
            sys.stderr.write(s)


if __name__ == '__main__':
    if settings.allow_foreign_addresses:
        server = WSGIServer(('0.0.0.0', settings.port_number), site_dispatch,
                            log=FilteredRequestLog())
        ip_server = '0.0.0.0'
    else:
        server = WSGIServer(('127.0.0.1', settings.port_number), site_dispatch,
                            log=FilteredRequestLog())
        ip_server = '127.0.0.1'

    print('Starting httpserver at http://%s:%s/' %
          (ip_server, settings.port_number))
    server.serve_forever()

# for uwsgi, gunicorn, etc.
application = site_dispatch