aboutsummaryrefslogtreecommitdiffstats
path: root/youtube
diff options
context:
space:
mode:
Diffstat (limited to 'youtube')
-rw-r--r--youtube/playlist.py121
1 files changed, 0 insertions, 121 deletions
diff --git a/youtube/playlist.py b/youtube/playlist.py
index 19f825f..3951b24 100644
--- a/youtube/playlist.py
+++ b/youtube/playlist.py
@@ -15,44 +15,6 @@ with open("yt_playlist_template.html", "r") as file:
-def youtube_obfuscated_endian(offset):
- if offset < 128:
- return bytes((offset,))
- first_byte = 255 & offset
- second_byte = 255 & (offset >> 7)
- second_byte = second_byte | 1
-
- # The next 2 bytes encode the offset in little endian order,
- # BUT, it's done in a strange way. The least significant bit (LSB) of the second byte is not part
- # of the offset. Instead, to get the number which the two bytes encode, that LSB
- # of the second byte is combined with the most significant bit (MSB) of the first byte
- # in a logical AND. Replace the two bits with the result of the AND to get the two little endian
- # bytes that represent the offset.
-
- return bytes((first_byte, second_byte))
-
-
-
-# just some garbage that's required, don't know what it means, if it means anything.
-ctoken_header = b'\xe2\xa9\x85\xb2\x02' # e2 a9 85 b2 02
-
-def byte(x):
- return bytes((x,))
-
-# TL;DR: the offset is hidden inside 3 nested base 64 encodes with random junk data added on the side periodically
-def create_ctoken(playlist_id, offset):
- obfuscated_offset = b'\x08' + youtube_obfuscated_endian(offset) # 0x08 slapped on for no apparent reason
- obfuscated_offset = b'PT:' + base64.urlsafe_b64encode(obfuscated_offset).replace(b'=', b'')
- obfuscated_offset = b'z' + byte(len(obfuscated_offset)) + obfuscated_offset
- obfuscated_offset = base64.urlsafe_b64encode(obfuscated_offset).replace(b'=', b'%3D')
-
- playlist_bytes = b'VL' + bytes(playlist_id, 'ascii')
- main_info = b'\x12' + byte(len(playlist_bytes)) + playlist_bytes + b'\x1a' + byte(len(obfuscated_offset)) + obfuscated_offset
-
- ctoken = base64.urlsafe_b64encode(ctoken_header + byte(len(main_info)) + main_info)
-
- return ctoken.decode('ascii')
-
def playlist_ctoken(playlist_id, offset):
offset = proto.uint(1, offset)
@@ -91,32 +53,6 @@ def playlist_first_page(playlist_id):
content = content[4:]
content = json.loads(common.uppercase_escape(content.decode('utf-8')))
return content
-
-ajax_info_dispatch = {
- 'view_count_text': ('views', common.get_text),
- 'num_videos_text': ('size', lambda node: common.get_text(node).split(' ')[0]),
- 'thumbnail': ('thumbnail', lambda node: node.url),
- 'title': ('title', common.get_text),
- 'owner_text': ('author', common.get_text),
- 'owner_endpoint': ('author_url', lambda node: node.url),
- 'description': ('description', common.get_formatted_text),
-
-}
-def metadata_info(ajax_json):
- info = {}
- try:
- for key, node in ajax_json.items():
- try:
- simple_key, function = dispatch[key]
- except KeyError:
- continue
- info[simple_key] = function(node)
- return info
- except (KeyError,IndexError):
- print(ajax_json)
- raise
-
-
#https://m.youtube.com/playlist?itct=CBMQybcCIhMIptj9xJaJ2wIV2JKcCh3Idwu-&ctoken=4qmFsgI2EiRWTFBMT3kwajlBdmxWWlB0bzZJa2pLZnB1MFNjeC0tN1BHVEMaDmVnWlFWRHBEUWxFJTNE&pbj=1
@@ -140,63 +76,6 @@ def get_videos_ajax(playlist_id, page):
info = json.loads(common.uppercase_escape(content.decode('utf-8')))
return info
-def get_playlist_videos(ajax_json):
- videos = []
- #info = get_bloated_playlist_videos(playlist_id, page)
- #print(info)
- video_list = ajax_json['content']['continuation_contents']['contents']
-
-
- for video_json_crap in video_list:
- try:
- videos.append({
- "title": video_json_crap["title"]['runs'][0]['text'],
- "id": video_json_crap["video_id"],
- "views": "",
- "duration": common.default_multi_get(video_json_crap, 'length', 'runs', 0, 'text', default=''), # livestreams dont have a length
- "author": video_json_crap['short_byline']['runs'][0]['text'],
- "author_url": '',
- "published": '',
- 'playlist_index': '',
-
- })
- except (KeyError, IndexError):
- print(video_json_crap)
- raise
- return videos
-
-def get_playlist_videos_format2(playlist_id, page):
- videos = []
- info = get_bloated_playlist_videos(playlist_id, page)
- video_list = info['response']['continuationContents']['playlistVideoListContinuation']['contents']
-
- for video_json_crap in video_list:
-
- video_json_crap = video_json_crap['videoRenderer']
-
- try:
- videos.append({
- "title": video_json_crap["title"]['runs'][0]['text'],
- "video_id": video_json_crap["videoId"],
- "views": "",
- "duration": common.default_multi_get(video_json_crap, 'lengthText', 'runs', 0, 'text', default=''), # livestreams dont have a length
- "uploader": video_json_crap['shortBylineText']['runs'][0]['text'],
- "uploader_url": common.ORIGIN_URL + video_json_crap['shortBylineText']['runs'][0]['navigationEndpoint']['commandMetadata']['webCommandMetadata']['url'],
- "published": common.default_multi_get(video_json_crap, 'publishedTimeText', 'simpleText', default=''),
- 'playlist_index': video_json_crap['index']['runs'][0]['text'],
-
- })
- except (KeyError, IndexError):
- print(video_json_crap)
- raise
- return videos
-
-
-def playlist_videos_html(ajax_json):
- result = ''
- for info in get_playlist_videos(ajax_json):
- result += common.small_video_item_html(info)
- return result
playlist_stat_template = Template('''
<div>$stat</div>''')