aboutsummaryrefslogtreecommitdiffstats
path: root/youtube/channel.py
diff options
context:
space:
mode:
Diffstat (limited to 'youtube/channel.py')
-rw-r--r--youtube/channel.py63
1 files changed, 49 insertions, 14 deletions
diff --git a/youtube/channel.py b/youtube/channel.py
index 3352ca2..a139bc1 100644
--- a/youtube/channel.py
+++ b/youtube/channel.py
@@ -274,6 +274,8 @@ def get_channel_tab(channel_id, page="1", sort=3, tab='videos', view=1,
# cache entries expire after 30 minutes
number_of_videos_cache = cachetools.TTLCache(128, 30*60)
+# Cache for continuation tokens (shorts/streams pagination)
+continuation_token_cache = cachetools.TTLCache(512, 15*60)
@cachetools.cached(number_of_videos_cache)
def get_number_of_videos_channel(channel_id):
if channel_id is None:
@@ -487,10 +489,46 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
if not channel_id:
channel_id = get_channel_id(base_url)
- # Use youtubei browse API with continuation token for all pages
- page_call = (get_channel_tab, channel_id, str(page_number), sort,
- tab, int(view))
- continuation = True
+ # For shorts/streams, use continuation token from cache or request
+ if tab in ('shorts', 'streams'):
+ if ctoken:
+ # Use ctoken directly from request (passed via pagination)
+ polymer_json = util.call_youtube_api('web', 'browse', {
+ 'continuation': ctoken,
+ })
+ continuation = True
+ elif page_number > 1:
+ # For page 2+, get ctoken from cache
+ cache_key = (channel_id, tab, sort, page_number - 1)
+ cached_ctoken = continuation_token_cache.get(cache_key)
+ if cached_ctoken:
+ polymer_json = util.call_youtube_api('web', 'browse', {
+ 'continuation': cached_ctoken,
+ })
+ continuation = True
+ else:
+ # Fallback: generate fresh ctoken
+ page_call = (get_channel_tab, channel_id, str(page_number), sort, tab, int(view))
+ continuation = True
+ polymer_json = gevent.spawn(*page_call)
+ polymer_json.join()
+ if polymer_json.exception:
+ raise polymer_json.exception
+ polymer_json = polymer_json.value
+ else:
+ # Page 1: generate fresh ctoken
+ page_call = (get_channel_tab, channel_id, str(page_number), sort, tab, int(view))
+ continuation = True
+ polymer_json = gevent.spawn(*page_call)
+ polymer_json.join()
+ if polymer_json.exception:
+ raise polymer_json.exception
+ polymer_json = polymer_json.value
+ else:
+ # videos tab - original logic
+ page_call = (get_channel_tab, channel_id, str(page_number), sort,
+ tab, int(view))
+ continuation = True
if tab == 'videos':
# Only need video count for the videos tab
@@ -505,14 +543,7 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
gevent.joinall(tasks)
util.check_gevent_exceptions(*tasks)
number_of_videos, polymer_json = tasks[0].value, tasks[1].value
- else:
- # For shorts/streams, item count is used instead
- polymer_json = gevent.spawn(*page_call)
- polymer_json.join()
- if polymer_json.exception:
- raise polymer_json.exception
- polymer_json = polymer_json.value
- number_of_videos = 0 # will be replaced by actual item count later
+ # For shorts/streams, polymer_json is already set above, nothing to do here
elif tab == 'about':
# polymer_json = util.fetch_url(base_url + '/about?pbj=1', headers_desktop, debug_name='gen_channel_about')
@@ -580,9 +611,13 @@ def get_channel_page_general_url(base_url, tab, request, channel_id=None):
if tab in ('videos', 'shorts', 'streams'):
if tab in ('shorts', 'streams'):
- # For shorts/streams, use the actual item count since
- # get_number_of_videos_channel counts regular uploads only
+ # For shorts/streams, use ctoken to determine pagination
+ info['is_last_page'] = (info.get('ctoken') is None)
number_of_videos = len(info.get('items', []))
+ # Cache the ctoken for next page
+ if info.get('ctoken'):
+ cache_key = (channel_id, tab, sort, page_number)
+ continuation_token_cache[cache_key] = info['ctoken']
info['number_of_videos'] = number_of_videos
info['number_of_pages'] = math.ceil(number_of_videos/page_size) if number_of_videos else 1
info['header_playlist_names'] = local_playlist.get_playlist_names()