diff options
-rw-r--r-- | yt_dlp/downloader/common.py | 47 | ||||
-rw-r--r-- | yt_dlp/downloader/external.py | 4 | ||||
-rw-r--r-- | yt_dlp/downloader/fragment.py | 4 | ||||
-rw-r--r-- | yt_dlp/extractor/arcpublishing.py | 3 | ||||
-rw-r--r-- | yt_dlp/extractor/dailymotion.py | 4 | ||||
-rw-r--r-- | yt_dlp/extractor/extractors.py | 8 | ||||
-rw-r--r-- | yt_dlp/extractor/imggaming.py | 2 | ||||
-rw-r--r-- | yt_dlp/extractor/manyvids.py | 1 | ||||
-rw-r--r-- | yt_dlp/extractor/mirrativ.py | 83 | ||||
-rw-r--r-- | yt_dlp/extractor/niconico.py | 187 | ||||
-rw-r--r-- | yt_dlp/extractor/rokfin.py | 256 | ||||
-rw-r--r-- | yt_dlp/extractor/threeqsdn.py | 3 | ||||
-rw-r--r-- | yt_dlp/extractor/tv2.py | 8 | ||||
-rw-r--r-- | yt_dlp/extractor/tvnet.py | 4 | ||||
-rw-r--r-- | yt_dlp/extractor/vgtv.py | 4 | ||||
-rw-r--r-- | yt_dlp/extractor/vimeo.py | 3 | ||||
-rw-r--r-- | yt_dlp/extractor/youtube.py | 25 | ||||
-rw-r--r-- | yt_dlp/options.py | 2 | ||||
-rw-r--r-- | yt_dlp/utils.py | 55 |
19 files changed, 553 insertions, 150 deletions
diff --git a/yt_dlp/downloader/common.py b/yt_dlp/downloader/common.py index 37321e34b..3a949d38a 100644 --- a/yt_dlp/downloader/common.py +++ b/yt_dlp/downloader/common.py @@ -210,28 +210,41 @@ class FileDownloader(object): def ytdl_filename(self, filename): return filename + '.ytdl' + def wrap_file_access(action, *, fatal=False): + def outer(func): + def inner(self, *args, **kwargs): + file_access_retries = self.params.get('file_access_retries', 0) + retry = 0 + while True: + try: + return func(self, *args, **kwargs) + except (IOError, OSError) as err: + retry = retry + 1 + if retry > file_access_retries or err.errno not in (errno.EACCES, errno.EINVAL): + if not fatal: + self.report_error(f'unable to {action} file: {err}') + return + raise + self.to_screen( + f'[download] Unable to {action} file due to file access error. ' + f'Retrying (attempt {retry} of {self.format_retries(file_access_retries)}) ...') + time.sleep(0.01) + return inner + return outer + + @wrap_file_access('open', fatal=True) def sanitize_open(self, filename, open_mode): - file_access_retries = self.params.get('file_access_retries', 10) - retry = 0 - while True: - try: - return sanitize_open(filename, open_mode) - except (IOError, OSError) as err: - retry = retry + 1 - if retry > file_access_retries or err.errno not in (errno.EACCES,): - raise - self.to_screen( - '[download] Got file access error. Retrying (attempt %d of %s) ...' - % (retry, self.format_retries(file_access_retries))) - time.sleep(0.01) + return sanitize_open(filename, open_mode) + + @wrap_file_access('remove') + def try_remove(self, filename): + os.remove(filename) + @wrap_file_access('rename') def try_rename(self, old_filename, new_filename): if old_filename == new_filename: return - try: - os.replace(old_filename, new_filename) - except (IOError, OSError) as err: - self.report_error(f'unable to rename file: {err}') + os.replace(old_filename, new_filename) def try_utime(self, filename, last_modified_hdr): """Try to set the last-modified time of the given file.""" diff --git a/yt_dlp/downloader/external.py b/yt_dlp/downloader/external.py index 03ae3a00e..be6202eef 100644 --- a/yt_dlp/downloader/external.py +++ b/yt_dlp/downloader/external.py @@ -159,9 +159,9 @@ class ExternalFD(FragmentFD): dest.write(decrypt_fragment(fragment, src.read())) src.close() if not self.params.get('keep_fragments', False): - os.remove(encodeFilename(fragment_filename)) + self.try_remove(encodeFilename(fragment_filename)) dest.close() - os.remove(encodeFilename('%s.frag.urls' % tmpfilename)) + self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename)) return 0 diff --git a/yt_dlp/downloader/fragment.py b/yt_dlp/downloader/fragment.py index 83a9f81b6..95fb2f9e7 100644 --- a/yt_dlp/downloader/fragment.py +++ b/yt_dlp/downloader/fragment.py @@ -159,7 +159,7 @@ class FragmentFD(FileDownloader): if self.__do_ytdl_file(ctx): self._write_ytdl_file(ctx) if not self.params.get('keep_fragments', False): - os.remove(encodeFilename(ctx['fragment_filename_sanitized'])) + self.try_remove(encodeFilename(ctx['fragment_filename_sanitized'])) del ctx['fragment_filename_sanitized'] def _prepare_frag_download(self, ctx): @@ -305,7 +305,7 @@ class FragmentFD(FileDownloader): if self.__do_ytdl_file(ctx): ytdl_filename = encodeFilename(self.ytdl_filename(ctx['filename'])) if os.path.isfile(ytdl_filename): - os.remove(ytdl_filename) + self.try_remove(ytdl_filename) elapsed = time.time() - ctx['started'] if ctx['tmpfilename'] == '-': diff --git a/yt_dlp/extractor/arcpublishing.py b/yt_dlp/extractor/arcpublishing.py index 1943fd5f8..8880e5c95 100644 --- a/yt_dlp/extractor/arcpublishing.py +++ b/yt_dlp/extractor/arcpublishing.py @@ -124,8 +124,7 @@ class ArcPublishingIE(InfoExtractor): formats.extend(smil_formats) elif stream_type in ('ts', 'hls'): m3u8_formats = self._extract_m3u8_formats( - s_url, uuid, 'mp4', 'm3u8' if is_live else 'm3u8_native', - m3u8_id='hls', fatal=False) + s_url, uuid, 'mp4', live=is_live, m3u8_id='hls', fatal=False) if all([f.get('acodec') == 'none' for f in m3u8_formats]): continue for f in m3u8_formats: diff --git a/yt_dlp/extractor/dailymotion.py b/yt_dlp/extractor/dailymotion.py index e71462061..95589d53a 100644 --- a/yt_dlp/extractor/dailymotion.py +++ b/yt_dlp/extractor/dailymotion.py @@ -259,9 +259,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor): continue if media_type == 'application/x-mpegURL': formats.extend(self._extract_m3u8_formats( - media_url, video_id, 'mp4', - 'm3u8' if is_live else 'm3u8_native', - m3u8_id='hls', fatal=False)) + media_url, video_id, 'mp4', live=is_live, m3u8_id='hls', fatal=False)) else: f = { 'url': media_url, diff --git a/yt_dlp/extractor/extractors.py b/yt_dlp/extractor/extractors.py index ef1d6c14d..0f26dc24f 100644 --- a/yt_dlp/extractor/extractors.py +++ b/yt_dlp/extractor/extractors.py @@ -1011,11 +1011,12 @@ from .nick import ( NickNightIE, NickRuIE, ) - from .niconico import ( NiconicoIE, NiconicoPlaylistIE, NiconicoUserIE, + NiconicoSeriesIE, + NiconicoHistoryIE, NicovideoSearchDateIE, NicovideoSearchIE, NicovideoSearchURLIE, @@ -1333,6 +1334,11 @@ from .reverbnation import ReverbNationIE from .rice import RICEIE from .rmcdecouverte import RMCDecouverteIE from .rockstargames import RockstarGamesIE +from .rokfin import ( + RokfinIE, + RokfinStackIE, + RokfinChannelIE, +) from .roosterteeth import RoosterTeethIE, RoosterTeethSeriesIE from .rottentomatoes import RottenTomatoesIE from .rozhlas import RozhlasIE diff --git a/yt_dlp/extractor/imggaming.py b/yt_dlp/extractor/imggaming.py index bae74b290..230dc86d3 100644 --- a/yt_dlp/extractor/imggaming.py +++ b/yt_dlp/extractor/imggaming.py @@ -96,7 +96,7 @@ class ImgGamingBaseIE(InfoExtractor): continue if proto == 'hls': m3u8_formats = self._extract_m3u8_formats( - media_url, media_id, 'mp4', 'm3u8' if is_live else 'm3u8_native', + media_url, media_id, 'mp4', live=is_live, m3u8_id='hls', fatal=False, headers=self._MANIFEST_HEADERS) for f in m3u8_formats: f.setdefault('http_headers', {}).update(self._MANIFEST_HEADERS) diff --git a/yt_dlp/extractor/manyvids.py b/yt_dlp/extractor/manyvids.py index e8d7163e4..bd24f8853 100644 --- a/yt_dlp/extractor/manyvids.py +++ b/yt_dlp/extractor/manyvids.py @@ -89,4 +89,5 @@ class ManyVidsIE(InfoExtractor): 'view_count': view_count, 'like_count': like_count, 'formats': formats, + 'uploader': self._html_search_regex(r'<meta[^>]+name="author"[^>]*>([^<]+)', webpage, 'uploader'), } diff --git a/yt_dlp/extractor/mirrativ.py b/yt_dlp/extractor/mirrativ.py index 81aea54f6..2111de615 100644 --- a/yt_dlp/extractor/mirrativ.py +++ b/yt_dlp/extractor/mirrativ.py @@ -19,9 +19,25 @@ class MirrativBaseIE(InfoExtractor): class MirrativIE(MirrativBaseIE): IE_NAME = 'mirrativ' _VALID_URL = r'https?://(?:www\.)?mirrativ\.com/live/(?P<id>[^/?#&]+)' - LIVE_API_URL = 'https://www.mirrativ.com/api/live/live?live_id=%s' TESTS = [{ + 'url': 'https://mirrativ.com/live/UQomuS7EMgHoxRHjEhNiHw', + 'info_dict': { + 'id': 'UQomuS7EMgHoxRHjEhNiHw', + 'title': 'ねむいぃ、。『参加型』🔰jcが初めてやるCOD✨初見さん大歓迎💗', + 'is_live': True, + 'description': 'md5:bfcd8f77f2fab24c3c672e5620f3f16e', + 'thumbnail': r're:https?://.+', + 'uploader': '# あ ち ゅ 。💡', + 'uploader_id': '118572165', + 'duration': None, + 'view_count': 1241, + 'release_timestamp': 1646229192, + 'timestamp': 1646229167, + 'was_live': False, + }, + 'skip': 'livestream', + }, { 'url': 'https://mirrativ.com/live/POxyuG1KmW2982lqlDTuPw', 'only_matching': True, }] @@ -29,12 +45,11 @@ class MirrativIE(MirrativBaseIE): def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage('https://www.mirrativ.com/live/%s' % video_id, video_id) - live_response = self._download_json(self.LIVE_API_URL % video_id, video_id) + live_response = self._download_json(f'https://www.mirrativ.com/api/live/live?live_id={video_id}', video_id) self.assert_error(live_response) hls_url = dict_get(live_response, ('archive_url_hls', 'streaming_url_hls')) is_live = bool(live_response.get('is_live')) - was_live = bool(live_response.get('is_archive')) if not hls_url: raise ExtractorError('Neither archive nor live is available.', expected=True) @@ -42,55 +57,29 @@ class MirrativIE(MirrativBaseIE): hls_url, video_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id='hls', live=is_live) - rtmp_url = live_response.get('streaming_url_edge') - if rtmp_url: - keys_to_copy = ('width', 'height', 'vcodec', 'acodec', 'tbr') - fmt = { - 'format_id': 'rtmp', - 'url': rtmp_url, - 'protocol': 'rtmp', - 'ext': 'mp4', - } - fmt.update({k: traverse_obj(formats, (0, k)) for k in keys_to_copy}) - formats.append(fmt) self._sort_formats(formats) - title = self._og_search_title(webpage, default=None) or self._search_regex( - r'<title>\s*(.+?) - Mirrativ\s*</title>', webpage) or live_response.get('title') - description = live_response.get('description') - thumbnail = live_response.get('image_url') - - duration = try_get(live_response, lambda x: x['ended_at'] - x['started_at']) - view_count = live_response.get('total_viewer_num') - release_timestamp = live_response.get('started_at') - timestamp = live_response.get('created_at') - - owner = live_response.get('owner', {}) - uploader = owner.get('name') - uploader_id = owner.get('user_id') - return { 'id': video_id, - 'title': title, + 'title': self._og_search_title(webpage, default=None) or self._search_regex( + r'<title>\s*(.+?) - Mirrativ\s*</title>', webpage) or live_response.get('title'), 'is_live': is_live, - 'description': description, + 'description': live_response.get('description'), 'formats': formats, - 'thumbnail': thumbnail, - 'uploader': uploader, - 'uploader_id': uploader_id, - 'duration': duration, - 'view_count': view_count, - 'release_timestamp': release_timestamp, - 'timestamp': timestamp, - 'was_live': was_live, + 'thumbnail': live_response.get('image_url'), + 'uploader': traverse_obj(live_response, ('owner', 'name')), + 'uploader_id': traverse_obj(live_response, ('owner', 'user_id')), + 'duration': try_get(live_response, lambda x: x['ended_at'] - x['started_at']) if not is_live else None, + 'view_count': live_response.get('total_viewer_num'), + 'release_timestamp': live_response.get('started_at'), + 'timestamp': live_response.get('created_at'), + 'was_live': bool(live_response.get('is_archive')), } class MirrativUserIE(MirrativBaseIE): IE_NAME = 'mirrativ:user' _VALID_URL = r'https?://(?:www\.)?mirrativ\.com/user/(?P<id>\d+)' - LIVE_HISTORY_API_URL = 'https://www.mirrativ.com/api/live/live_history?user_id=%s&page=%d' - USER_INFO_API_URL = 'https://www.mirrativ.com/api/user/profile?user_id=%s' _TESTS = [{ # Live archive is available up to 3 days @@ -104,8 +93,8 @@ class MirrativUserIE(MirrativBaseIE): page = 1 while page is not None: api_response = self._download_json( - self.LIVE_HISTORY_API_URL % (user_id, page), user_id, - note='Downloading page %d' % page) + f'https://www.mirrativ.com/api/live/live_history?user_id={user_id}&page={page}', user_id, + note=f'Downloading page {page}') self.assert_error(api_response) lives = api_response.get('lives') if not lives: @@ -123,12 +112,10 @@ class MirrativUserIE(MirrativBaseIE): def _real_extract(self, url): user_id = self._match_id(url) user_info = self._download_json( - self.USER_INFO_API_URL % user_id, user_id, + f'https://www.mirrativ.com/api/user/profile?user_id={user_id}', user_id, note='Downloading user info', fatal=False) self.assert_error(user_info) - uploader = user_info.get('name') - description = user_info.get('description') - - entries = self._entries(user_id) - return self.playlist_result(entries, user_id, uploader, description) + return self.playlist_result( + self._entries(user_id), user_id, + user_info.get('name'), user_info.get('description')) diff --git a/yt_dlp/extractor/niconico.py b/yt_dlp/extractor/niconico.py index 6e561bee5..8f56fc95b 100644 --- a/yt_dlp/extractor/niconico.py +++ b/yt_dlp/extractor/niconico.py @@ -3,6 +3,7 @@ from __future__ import unicode_literals import datetime import itertools +import functools import json import re @@ -12,6 +13,7 @@ from ..compat import ( compat_str, compat_parse_qs, compat_urllib_parse_urlparse, + compat_HTTPError, ) from ..utils import ( ExtractorError, @@ -24,7 +26,9 @@ from ..utils import ( PostProcessingError, remove_start, str_or_none, + traverse_obj, try_get, + unescapeHTML, unified_timestamp, urlencode_postdata, xpath_text, @@ -606,8 +610,61 @@ class NiconicoIE(InfoExtractor): } -class NiconicoPlaylistIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?nicovideo\.jp/(?:user/\d+/|my/)?mylist/(?P<id>\d+)' +class NiconicoPlaylistBaseIE(InfoExtractor): + _PAGE_SIZE = 100 + + _API_HEADERS = { + 'X-Frontend-ID': '6', + 'X-Frontend-Version': '0', + 'X-Niconico-Language': 'en-us' + } + + def _call_api(self, list_id, resource, query): + "Implement this in child class" + pass + + @staticmethod + def _parse_owner(item): + return { + 'uploader': traverse_obj(item, ('owner', 'name')), + 'uploader_id': traverse_obj(item, ('owner', 'id')), + } + + def _fetch_page(self, list_id, page): + page += 1 + resp = self._call_api(list_id, 'page %d' % page, { + 'page': page, + 'pageSize': self._PAGE_SIZE, + }) + # this is needed to support both mylist and user + for video in traverse_obj(resp, ('items', ..., ('video', None))) or []: + video_id = video.get('id') + if not video_id: + # skip {"video": {"id": "blablabla", ...}} + continue + count = video.get('count') or {} + get_count = lambda x: int_or_none(count.get(x)) + yield { + '_type': 'url', + 'id': video_id, + 'title': video.get('title'), + 'url': f'https://www.nicovideo.jp/watch/{video_id}', + 'description': video.get('shortDescription'), + 'duration': int_or_none(video.get('duration')), + 'view_count': get_count('view'), + 'comment_count': get_count('comment'), + 'thumbnail': traverse_obj(video, ('thumbnail', ('nHdUrl', 'largeUrl', 'listingUrl', 'url'))), + 'ie_key': NiconicoIE.ie_key(), + **self._parse_owner(video), + } + + def _entries(self, list_id): + return OnDemandPagedList(functools.partial(self._fetch_page, list_id), self._PAGE_SIZE) + + +class NiconicoPlaylistIE(NiconicoPlaylistBaseIE): + IE_NAME = 'niconico:playlist' + _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/(?:user/\d+/)?(?:my/)?mylist/(?:#/)?(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.nicovideo.jp/mylist/27411728', @@ -618,48 +675,110 @@ class NiconicoPlaylistIE(InfoExtractor): 'uploader': 'のっく', 'uploader_id': '805442', }, - 'playlist_mincount': 225, + 'playlist_mincount': 291, }, { 'url': 'https://www.nicovideo.jp/user/805442/mylist/27411728', 'only_matching': True, + }, { + 'url': 'https://www.nicovideo.jp/my/mylist/#/68048635', + 'only_matching': True, }] - _API_HEADERS = { - 'X-Frontend-ID': '6', - 'X-Frontend-Version': '0' - } + def _call_api(self, list_id, resource, query): + return self._download_json( + f'https://nvapi.nicovideo.jp/v2/mylists/{list_id}', list_id, + f'Downloading {resource}', query=query, + headers=self._API_HEADERS)['data']['mylist'] def _real_extract(self, url): list_id = self._match_id(url) + mylist = self._call_api(list_id, 'list', { + 'pageSize': 1, + }) + return self.playlist_result( + self._entries(list_id), list_id, + mylist.get('name'), mylist.get('description'), **self._parse_owner(mylist)) - def get_page_data(pagenum, pagesize): - return self._download_json( - 'http://nvapi.nicovideo.jp/v2/mylists/' + list_id, list_id, - query={'page': 1 + pagenum, 'pageSize': pagesize}, - headers=self._API_HEADERS).get('data').get('mylist') - - data = get_page_data(0, 1) - title = data.get('name') - description = data.get('description') - uploader = data.get('owner').get('name') - uploader_id = data.get('owner').get('id') - - def pagefunc(pagenum): - data = get_page_data(pagenum, 25) - return ({ - '_type': 'url', - 'url': 'http://www.nicovideo.jp/watch/' + item.get('watchId'), - } for item in data.get('items')) - return { - '_type': 'playlist', - 'id': list_id, - 'title': title, - 'description': description, - 'uploader': uploader, - 'uploader_id': uploader_id, - 'entries': OnDemandPagedList(pagefunc, 25), - } +class NiconicoSeriesIE(InfoExtractor): + IE_NAME = 'niconico:series' + _VALID_URL = r'https?://(?:(?:www\.|sp\.)?nicovideo\.jp|nico\.ms)/series/(?P<id>\d+)' + + _TESTS = [{ + 'url': 'https://www.nicovideo.jp/series/110226', + 'info_dict': { + 'id': '110226', + 'title': 'ご立派ァ!のシリーズ', + }, + 'playlist_mincount': 10, # as of 2021/03/17 + }, { + 'url': 'https://www.nicovideo.jp/series/12312/', + 'info_dict': { + 'id': '12312', + 'title': 'バトルスピリッツ お勧めカード紹介(調整中)', + }, + 'playlist_mincount': 97, # as of 2021/03/17 + }, { + 'url': 'https://nico.ms/series/203559', + 'only_matching': True, + }] + + def _real_extract(self, url): + list_id = self._match_id(url) + webpage = self._download_webpage(f'https://www.nicovideo.jp/series/{list_id}', list_id) + + title = self._search_regex( + (r'<title>「(.+)(全', + r'<div class="TwitterShareButton"\s+data-text="(.+)\s+https:'), + webpage, 'title', fatal=False) + if title: + title = unescapeHTML(title) + playlist = [ + self.url_result(f'https://www.nicovideo.jp/watch/{v_id}', video_id=v_id) + for v_id in re.findall(r'href="/watch/([a-z0-9]+)" data-href="/watch/\1', webpage)] + return self.playlist_result(playlist, list_id, title) + + +class NiconicoHistoryIE(NiconicoPlaylistBaseIE): + IE_NAME = 'niconico:history' + IE_DESC = 'NicoNico user history. Requires cookies.' + _VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/history' + + _TESTS = [{ + 'note': 'PC page, with /video', + 'url': 'https://www.nicovideo.jp/my/history/video', + 'only_matching': True, + }, { + 'note': 'PC page, without /video', + 'url': 'https://www.nicovideo.jp/my/history', + 'only_matching': True, + }, { + 'note': 'mobile page, with /video', + 'url': 'https://sp.nicovideo.jp/my/history/video', + 'only_matching': True, + }, { + 'note': 'mobile page, without /video', + 'url': 'https://sp.nicovideo.jp/my/history', + 'only_matching': True, + }] + + def _call_api(self, list_id, resource, query): + return self._download_json( + 'https://nvapi.nicovideo.jp/v1/users/me/watch/history', 'history', + f'Downloading {resource}', query=query, + headers=self._API_HEADERS)['data'] + + def _real_extract(self, url): + list_id = 'history' + try: + mylist = self._call_api(list_id, 'list', { + 'pageSize': 1, + }) + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401: + self.raise_login_required('You have to be logged in to get your watch history') + raise + return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist)) class NicovideoSearchBaseIE(InfoExtractor): diff --git a/yt_dlp/extractor/rokfin.py b/yt_dlp/extractor/rokfin.py new file mode 100644 index 000000000..79a5b2336 --- /dev/null +++ b/yt_dlp/extractor/rokfin.py @@ -0,0 +1,256 @@ +# coding: utf-8 +import itertools +from datetime import datetime + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + ExtractorError, + float_or_none, + format_field, + int_or_none, + str_or_none, + traverse_obj, + unified_timestamp, + url_or_none, +) + + +_API_BASE_URL = 'https://prod-api-v2.production.rokfin.com/api/v2/public/' + + +class RokfinIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?rokfin\.com/(?P<id>(?P<type>post|stream)/\d+)' + _TESTS = [{ + 'url': 'https://www.rokfin.com/post/57548/Mitt-Romneys-Crazy-Solution-To-Climate-Change', + 'info_dict': { + 'id': 'post/57548', + 'ext': 'mp4', + 'title': 'Mitt Romney\'s Crazy Solution To Climate Change', + 'thumbnail': r're:https://img\.production\.rokfin\.com/.+', + 'upload_date': '20211023', + 'timestamp': 1634998029, + 'channel': 'Jimmy Dore', + 'channel_id': 65429, + 'channel_url': 'https://rokfin.com/TheJimmyDoreShow', + 'duration': 213.0, + 'availability': 'public', + 'live_status': 'not_live', + 'dislike_count': int, + 'like_count': int, + } + }, { + 'url': 'https://rokfin.com/post/223/Julian-Assange-Arrested-Streaming-In-Real-Time', + 'info_dict': { + 'id': 'post/223', + 'ext': 'mp4', + 'title': 'Julian Assange Arrested: Streaming In Real Time', + 'thumbnail': r're:https://img\.production\.rokfin\.com/.+', + 'upload_date': '20190412', + 'timestamp': 1555052644, + 'channel': 'Ron Placone', + 'channel_id': 10, + 'channel_url': 'https://rokfin.com/RonPlacone', + 'availability': 'public', + 'live_status': 'not_live', + 'dislike_count': int, + 'like_count': int, + 'tags': ['FreeThinkingMedia^', 'RealProgressives^'], + } + }, { + 'url': 'https://www.rokfin.com/stream/10543/Its-A-Crazy-Mess-Regional-Director-Blows-Whistle-On-Pfizers-Vaccine-Trial-Data', + 'info_dict': { + 'id': 'stream/10543', + 'ext': 'mp4', + 'title': '"It\'s A Crazy Mess" Regional Director Blows Whistle On Pfizer\'s Vaccine Trial Data', + 'thumbnail': r're:https://img\.production\.rokfin\.com/.+', + 'description': 'md5:324ce2d3e3b62e659506409e458b9d8e', + 'channel': 'Ryan Cristián', + 'channel_id': 53856, + 'channel_url': 'https://rokfin.com/TLAVagabond', + 'availability': 'public', + 'is_live': False, + 'was_live': True, + 'live_status': 'was_live', + 'timestamp': 1635874720, + 'release_timestamp': 1635874720, + 'release_date': '20211102', + 'upload_date': '20211102', + 'dislike_count': int, + 'like_count': int, + 'tags': ['FreeThinkingMedia^'], + } + }] + + def _real_extract(self, url): + video_id, video_type = self._match_valid_url(url).group('id', 'type') + + metadata = self._download_json(f'{_API_BASE_URL}{video_id}', video_id) + + scheduled = unified_timestamp(metadata.get('scheduledAt')) + live_status = ('was_live' if metadata.get('stoppedAt') + else 'is_upcoming' if scheduled + else 'is_live' if video_type == 'stream' + else 'not_live') + + video_url = traverse_obj(metadata, 'url', ('content', 'contentUrl'), expected_type=url_or_none) + formats, subtitles = [{'url': video_url}] if video_url else [], {} + if determine_ext(video_url) == 'm3u8': + formats, subtitles = self._extract_m3u8_formats_and_subtitles( + video_url, video_id, fatal=False, live=live_status == 'is_live') + + if not formats: + if metadata.get('premiumPlan'): + self.raise_login_required('This video is only available to premium users', True, method='cookies') + elif scheduled: + self.raise_no_formats( + f'Stream is offline; sheduled for {datetime.fromtimestamp(scheduled).strftime("%Y-%m-%d %H:%M:%S")}', + video_id=video_id, expected=True) + self._sort_formats(formats) + + uploader = traverse_obj(metadata, ('createdBy', 'username'), ('creator', 'username')) + timestamp = (scheduled or float_or_none(metadata.get('postedAtMilli'), 1000) + or unified_timestamp(metadata.get('creationDateTime'))) + return { + 'id': video_id, + 'formats': formats, + 'subtitles': subtitles, + 'title': str_or_none(traverse_obj(metadata, 'title', ('content', 'contentTitle'))), + 'duration': float_or_none(traverse_obj(metadata, ('content', 'duration'))), + 'thumbnail': url_or_none(traverse_obj(metadata, 'thumbnail', ('content', 'thumbnailUrl1'))), + 'description': str_or_none(traverse_obj(metadata, 'description', ('content', 'contentDescription'))), + 'like_count': int_or_none(metadata.get('likeCount')), + 'dislike_count': int_or_none(metadata.get('dislikeCount')), + 'channel': str_or_none(traverse_obj(metadata, ('createdBy', 'name'), ('creator', 'name'))), + 'channel_id': traverse_obj(metadata, ('createdBy', 'id'), ('creator', 'id')), + 'channel_url': url_or_none(f'https://rokfin.com/{uploader}') if uploader else None, + 'timestamp': timestamp, + 'release_timestamp': timestamp if live_status != 'not_live' else None, + 'tags': traverse_obj(metadata, ('tags', ..., 'title'), expected_type=str_or_none), + 'live_status': live_status, + 'availability': self._availability( + needs_premium=bool(metadata.get('premiumPlan')), + is_private=False, needs_subscription=False, needs_auth=False, is_unlisted=False), + # 'comment_count': metadata.get('numComments'), # Data provided by website is wrong + '__post_extractor': self.extract_comments(video_id) if video_type == 'post' else None, + } + + def _get_comments(self, video_id): + pages_total = None + for page_n in itertools.count(): + raw_comments = self._download_json( + f'{_API_BASE_URL}comment?postId={video_id[5:]}&page={page_n}&size=50', + video_id, note=f'Downloading viewer comments page {page_n + 1}{format_field(pages_total, template=" of %s")}', + fatal=False) or {} + + for comment in raw_comments.get('content') or []: + yield { + 'text': str_or_none(comment.get('comment')), + 'author': str_or_none(comment.get('name')), + 'id': comment.get('commentId'), + 'author_id': comment.get('userId'), + 'parent': 'root', + 'like_count': int_or_none(comment.get('numLikes')), + 'dislike_count': int_or_none(comment.get('numDislikes')), + 'timestamp': unified_timestamp(comment.get('postedAt')) + } + + pages_total = int_or_none(raw_comments.get('totalPages')) or None + is_last = raw_comments.get('last') + if not raw_comments.get('content') or is_last or (page_n > pages_total if pages_total else is_last is not False): + return + + +class RokfinPlaylistBaseIE(InfoExtractor): + _TYPES = { + 'video': 'post', + 'audio': 'post', + 'stream': 'stream', + 'dead_stream': 'stream', + 'stack': 'stack', + } + + def _get_video_data(self, metadata): + for content in metadata.get('content') or []: + media_type = self._TYPES.get(content.get('mediaType')) + video_id = content.get('id') if media_type == 'post' else content.get('mediaId') + if not media_type or not video_id: + continue + + yield self.url_result(f'https://rokfin.com/{media_type}/{video_id}', video_id=f'{media_type}/{video_id}', + video_title=str_or_none(traverse_obj(content, ('content', 'contentTitle')))) + + +class RokfinStackIE(RokfinPlaylistBaseIE): + IE_NAME = 'rokfin:stack' + _VALID_URL = r'https?://(?:www\.)?rokfin\.com/stack/(?P<id>[^/]+)' + _TESTS = [{ + 'url': 'https://www.rokfin.com/stack/271/Tulsi-Gabbard-Portsmouth-Townhall-FULL--Feb-9-2020', + 'playlist_count': 8, + 'info_dict': { + 'id': '271', + }, + }] + + def _real_extract(self, url): + list_id = self._match_id(url) + return self.playlist_result(self._get_video_data( + self._download_json(f'{_API_BASE_URL}stack/{list_id}', list_id)), list_id) + + +class RokfinChannelIE(RokfinPlaylistBaseIE): + IE_NAME = 'rokfin:channel' + _VALID_URL = r'https?://(?:www\.)?rokfin\.com/(?!((feed/?)|(discover/?)|(channels/?))$)(?P<id>[^/]+)/?$' + _TESTS = [{ + 'url': 'https://rokfin.com/TheConvoCouch', + 'playlist_mincount': 100, + 'info_dict': { + 'id': '12071-new', + 'title': 'TheConvoCouch - New', + 'description': 'md5:bb622b1bca100209b91cd685f7847f06', + }, + }] + + _TABS = { + 'new': 'posts', + 'top': 'top', + 'videos': 'video', + 'podcasts': 'audio', + 'streams': 'stream', + 'stacks': 'stack', + } + + def _real_initialize(self): + self._validate_extractor_args() + + def _validate_extractor_args(self): + requested_tabs = self._configuration_arg('tab', None) + if requested_tabs is not None and (len(requested_tabs) > 1 or requested_tabs[0] not in self._TABS): + raise ExtractorError(f'Invalid extractor-arg "tab". Must be one of {", ".join(self._TABS)}', expected=True) + + def _entries(self, channel_id, channel_name, tab): + pages_total = None + for page_n in itertools.count(0): + if tab in ('posts', 'top'): + data_url = f'{_API_BASE_URL}user/{channel_name}/{tab}?page={page_n}&size=50' + else: + data_url = f'{_API_BASE_URL}post/search/{tab}?page={page_n}&size=50&creator={channel_id}' + metadata = self._download_json( + data_url, channel_name, + note=f'Downloading video metadata page {page_n + 1}{format_field(pages_total, template=" of %s")}') + + yield from self._get_video_data(metadata) + pages_total = int_or_none(metadata.get('totalPages')) or None + is_last = metadata.get('last') + if is_last or (page_n > pages_total if pages_total else is_last is not False): + return + + def _real_extract(self, url): + channel_name = self._match_id(url) + channel_info = self._download_json(f'{_API_BASE_URL}user/{channel_name}', channel_name) + channel_id = channel_info['id'] + tab = self._configuration_arg('tab', default=['new'])[0] + + return self.playlist_result( + self._entries(channel_id, channel_name, self._TABS[tab]), + f'{channel_id}-{tab}', f'{channel_name} - {tab.title()}', str_or_none(channel_info.get('description'))) diff --git a/yt_dlp/extractor/threeqsdn.py b/yt_dlp/extractor/threeqsdn.py index 22b4fe7c8..00a51dccd 100644 --- a/yt_dlp/extractor/threeqsdn.py +++ b/yt_dlp/extractor/threeqsdn.py @@ -111,8 +111,7 @@ class ThreeQSDNIE(InfoExtractor): subtitles = self._merge_subtitles(subtitles, subs) elif source_type == 'hls': fmts, subs = self._extract_m3u8_formats_and_subtitles( - source, video_id, 'mp4', 'm3u8' if live else 'm3u8_native', - m3u8_id='hls', fatal=False) + source, video_id, 'mp4', live=live, m3u8_id='hls', fatal=False) formats.extend(fmts) subtitles = self._merge_subtitles(subtitles, subs) elif source_type == 'progressive': diff --git a/yt_dlp/extractor/tv2.py b/yt_dlp/extractor/tv2.py index b48dfe389..977da30fe 100644 --- a/yt_dlp/extractor/tv2.py +++ b/yt_dlp/extractor/tv2.py @@ -81,9 +81,7 @@ class TV2IE(InfoExtractor): elif ext == 'm3u8': if not data.get('drmProtected'): formats.extend(self._extract_m3u8_formats( - video_url, video_id, 'mp4', - 'm3u8' if is_live else 'm3u8_native', - m3u8_id=format_id, fatal=False)) + video_url, video_id, 'mp4', live=is_live, m3u8_id=format_id, fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, format_id, fatal=False)) @@ -244,9 +242,7 @@ class KatsomoIE(InfoExtractor): elif ext == 'm3u8': if not data.get('drmProtected'): formats.extend(self._extract_m3u8_formats( - video_url, video_id, 'mp4', - 'm3u8' if is_live else 'm3u8_native', - m3u8_id=format_id, fatal=False)) + video_url, video_id, 'mp4', live=is_live, m3u8_id=format_id, fatal=False)) elif ext == 'mpd': formats.extend(self._extract_mpd_formats( video_url, video_id, format_id, fatal=False)) diff --git a/yt_dlp/extractor/tvnet.py b/yt_dlp/extractor/tvnet.py index 4fe8dfb6c..aa1e9d923 100644 --- a/yt_dlp/extractor/tvnet.py +++ b/yt_dlp/extractor/tvnet.py @@ -111,9 +111,7 @@ class TVNetIE(InfoExtractor): continue stream_urls.add(stream_url) formats.extend(self._extract_m3u8_formats( - stream_url, video_id, 'mp4', - entry_protocol='m3u8' if is_live else 'm3u8_native', - m3u8_id='hls', fatal=False)) + stream_url, video_id, 'mp4', live=is_live, m3u8_id='hls', fatal=False)) self._sort_formats(formats) # better support for radio streams diff --git a/yt_dlp/extractor/vgtv.py b/yt_dlp/extractor/vgtv.py index 10083cd24..9d6090b08 100644 --- a/yt_dlp/extractor/vgtv.py +++ b/yt_dlp/extractor/vgtv.py @@ -195,9 +195,7 @@ class VGTVIE(XstreamIE): hls_url = streams.get('hls') if hls_url: formats.extend(self._extract_m3u8_formats( - hls_url, video_id, 'mp4', - entry_protocol='m3u8' if is_live else 'm3u8_native', - m3u8_id='hls', fatal=False)) + hls_url, video_id, 'mp4', live=is_live, m3u8_id='hls', fatal=False)) hds_url = streams.get('hds') if hds_url: diff --git a/yt_dlp/extractor/vimeo.py b/yt_dlp/extractor/vimeo.py index c2dec244f..1a9fd00e4 100644 --- a/yt_dlp/extractor/vimeo.py +++ b/yt_dlp/extractor/vimeo.py @@ -166,8 +166,7 @@ class VimeoBaseInfoExtractor(InfoExtractor): for f_id, m_url in sep_manifest_urls: if files_type == 'hls': fmts, subs = self._extract_m3u8_formats_and_subtitles( - m_url, video_id, 'mp4', - 'm3u8' if is_live else 'm3u8_native', m3u8_id=f_id, + m_url, video_id, 'mp4', live=is_live, m3u8_id=f_id, note='Downloading %s m3u8 information' % cdn_name, fatal=False) formats.extend(fmts) diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index 47b3c5a85..602d48e3c 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -3950,13 +3950,14 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor): break @staticmethod - def _extract_selected_tab(tabs): + def _extract_selected_tab(tabs, fatal=True): for tab in tabs: renderer = dict_get(tab, ('tabRenderer', 'expandableTabRenderer')) or {} if renderer.get('selected') is True: return renderer else: - raise ExtractorError('Unable to find selected tab') + if fatal: + raise ExtractorError('Unable to find selected tab') @classmethod def _extract_uploader(cls, data): @@ -4229,7 +4230,7 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor): self.report_warning(error_to_compat_str(e)) break - if dict_get(data, ('contents', 'currentVideoEndpoint')): + if dict_get(data, ('contents', 'currentVideoEndpoint', 'onResponseReceivedActions')): break last_error = 'Incomplete yt initial data received' @@ -4248,7 +4249,7 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor): ytcfg = ytcfg or self.extract_ytcfg(item_id, webpage) # Reject webpage data if redirected to home page without explicitly requesting selected_tab = self._extract_selected_tab(traverse_obj( - data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[])) or {} + data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list, default=[]), fatal=False) or {} if (url != 'https://www.youtube.com/feed/recommended' and selected_tab.get('tabIdentifier') == 'FEwhat_to_watch' # Home page and 'no-youtube-channel-redirect' not in self.get_param('compat_opts', [])): @@ -4280,7 +4281,7 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor): return self._extract_response( item_id=item_id, query=params, ep=ep, headers=headers, ytcfg=ytcfg, fatal=fatal, default_client=default_client, - check_get_keys=('contents', 'currentVideoEndpoint')) + check_get_keys=('contents', 'currentVideoEndpoint', 'onResponseReceivedActions')) err_note = 'Failed to resolve url (does the playlist exist?)' if fatal: raise ExtractorError(err_note, expected=True) @@ -4981,6 +4982,10 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): 'skip_download': True, 'extractor_args': {'youtubetab': {'skip': ['webpage']}} }, + }, { + 'note': 'non-standard redirect to regional channel', + 'url': 'https://www.youtube.com/channel/UCwVVpHQ2Cs9iGJfpdFngePQ', + 'only_matching': True }] @classmethod @@ -5053,6 +5058,16 @@ class YoutubeTabIE(YoutubeTabBaseInfoExtractor): data, ytcfg = self._extract_data(url, item_id) + # YouTube may provide a non-standard redirect to the regional channel + # See: https://github.com/yt-dlp/yt-dlp/issues/2694 + redirect_url = traverse_obj( + data, ('onResponseReceivedActions', ..., 'navigateAction', 'endpoint', 'commandMetadata', 'webCommandMetadata', 'url'), get_all=False) + if redirect_url and 'no-youtube-channel-redirect' not in compat_opts: + redirect_url = ''.join(( + urljoin('https://www.youtube.com', redirect_url), mobj['tab'], mobj['post'])) + self.to_screen(f'This playlist is likely not available in your region. Following redirect to regional playlist {redirect_url}') + return self.url_result(redirect_url, ie=YoutubeTabIE.ie_key()) + tabs = traverse_obj(data, ('contents', 'twoColumnBrowseResultsRenderer', 'tabs'), expected_type=list) if tabs: selected_tab = self._extract_selected_tab(tabs) diff --git a/yt_dlp/options.py b/yt_dlp/options.py index f0bc3c09c..bee8e3637 100644 --- a/yt_dlp/options.py +++ b/yt_dlp/options.py @@ -723,7 +723,7 @@ def create_parser(): help='Number of retries (default is %default), or "infinite"') downloader.add_option( '--file-access-retries', - dest='file_access_retries', metavar='RETRIES', default=10, + dest='file_access_retries', metavar='RETRIES', default=3, help='Number of times to retry on file access error (default is %default), or "infinite"') downloader.add_option( '--fragment-retries', diff --git a/yt_dlp/utils.py b/yt_dlp/utils.py index cc08bd130..5eb049ab7 100644 --- a/yt_dlp/utils.py +++ b/yt_dlp/utils.py @@ -2122,37 +2122,47 @@ if sys.platform == 'win32': whole_low = 0xffffffff whole_high = 0x7fffffff - def _lock_file(f, exclusive, block): # todo: block unused on win32 + def _lock_file(f, exclusive, block): overlapped = OVERLAPPED() overlapped.Offset = 0 overlapped.OffsetHigh = 0 overlapped.hEvent = 0 f._lock_file_overlapped_p = ctypes.pointer(overlapped) - handle = msvcrt.get_osfhandle(f.fileno()) - if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0, - whole_low, whole_high, f._lock_file_overlapped_p): - raise OSError('Locking file failed: %r' % ctypes.FormatError()) + + if not LockFileEx(msvcrt.get_osfhandle(f.fileno()), + (0x2 if exclusive else 0x0) | (0x0 if block else 0x1), + 0, whole_low, whole_high, f._lock_file_overlapped_p): + raise BlockingIOError('Locking file failed: %r' % ctypes.FormatError()) def _unlock_file(f): assert f._lock_file_overlapped_p handle = msvcrt.get_osfhandle(f.fileno()) - if not UnlockFileEx(handle, 0, - whole_low, whole_high, f._lock_file_overlapped_p): + if not UnlockFileEx(handle, 0, whole_low, whole_high, f._lock_file_overlapped_p): raise OSError('Unlocking file failed: %r' % ctypes.FormatError()) else: - # Some platforms, such as Jython, is missing fcntl try: import fcntl def _lock_file(f, exclusive, block): - fcntl.flock(f, - fcntl.LOCK_SH if not exclusive - else fcntl.LOCK_EX if block - else fcntl.LOCK_EX | fcntl.LOCK_NB) + try: + fcntl.flock(f, + fcntl.LOCK_SH if not exclusive + else fcntl.LOCK_EX if block + else fcntl.LOCK_EX | fcntl.LOCK_NB) + except BlockingIOError: + raise + except OSError: # AOSP does not have flock() + fcntl.lockf(f, + fcntl.LOCK_SH if not exclusive + else fcntl.LOCK_EX if block + else fcntl.LOCK_EX | fcntl.LOCK_NB) def _unlock_file(f): - fcntl.flock(f, fcntl.LOCK_UN) + try: + fcntl.flock(f, fcntl.LOCK_UN) + except OSError: + fcntl.lockf(f, fcntl.LOCK_UN) except ImportError: UNSUPPORTED_MSG = 'file locking is not supported on this platform' @@ -2165,6 +2175,8 @@ else: class locked_file(object): + _closed = False + def __init__(self, filename, mode, block=True, encoding=None): assert mode in ['r', 'rb', 'a', 'ab', 'w', 'wb'] self.f = io.open(filename, mode, encoding=encoding) @@ -2182,9 +2194,11 @@ class locked_file(object): def __exit__(self, etype, value, traceback): try: - _unlock_file(self.f) + if not self._closed: + _unlock_file(self.f) finally: self.f.close() + self._closed = True def __iter__(self): return iter(self.f) @@ -2799,13 +2813,14 @@ class PagedList: def __init__(self, pagefunc, pagesize, use_cache=True): self._pagefunc = pagefunc self._pagesize = pagesize + self._pagecount = float('inf') self._use_cache = use_cache self._cache = {} def getpage(self, pagenum): page_results = self._cache.get(pagenum) if page_results is None: - page_results = list(self._pagefunc(pagenum)) + page_results = [] if pagenum > self._pagecount else list(self._pagefunc(pagenum)) if self._use_cache: self._cache[pagenum] = page_results return page_results @@ -2817,7 +2832,7 @@ class PagedList: raise NotImplementedError('This method must be implemented by subclasses') def __getitem__(self, idx): - # NOTE: cache must be enabled if this is used + assert self._use_cache, 'Indexing PagedList requires cache' if not isinstance(idx, int) or idx < 0: raise TypeError('indices must be non-negative integers') entries = self.getslice(idx, idx + 1) @@ -2843,7 +2858,11 @@ class OnDemandPagedList(PagedList): if (end is not None and firstid <= end <= nextfirstid) else None) - page_results = self.getpage(pagenum) + try: + page_results = self.getpage(pagenum) + except Exception: + self._pagecount = pagenum - 1 + raise if startv != 0 or endv is not None: page_results = page_results[startv:endv] yield from page_results @@ -2863,8 +2882,8 @@ class OnDemandPagedList(PagedList): class InAdvancePagedList(PagedList): def __init__(self, pagefunc, pagecount, pagesize): - self._pagecount = pagecount PagedList.__init__(self, pagefunc, pagesize, True) + self._pagecount = pagecount def _getslice(self, start, end): start_page = start // self._pagesize |