diff options
author | Jesús <heckyel@hyperbola.info> | 2021-10-18 15:24:21 -0500 |
---|---|---|
committer | Jesús <heckyel@hyperbola.info> | 2021-10-18 15:24:21 -0500 |
commit | 5122028a4bcac4ae577ef7fbd55ccad5cb34ef5e (patch) | |
tree | 65209bc739db35e31f1c9b5b868eb5df4fe12ae3 /hypervideo_dl/extractor/common.py | |
parent | 27fe903c511691c078942bef5ee9a05a43b15c8f (diff) | |
download | hypervideo-5122028a4bcac4ae577ef7fbd55ccad5cb34ef5e.tar.lz hypervideo-5122028a4bcac4ae577ef7fbd55ccad5cb34ef5e.tar.xz hypervideo-5122028a4bcac4ae577ef7fbd55ccad5cb34ef5e.zip |
update from upstream
Diffstat (limited to 'hypervideo_dl/extractor/common.py')
-rw-r--r-- | hypervideo_dl/extractor/common.py | 1653 |
1 files changed, 1127 insertions, 526 deletions
diff --git a/hypervideo_dl/extractor/common.py b/hypervideo_dl/extractor/common.py index 8b622be..df74c75 100644 --- a/hypervideo_dl/extractor/common.py +++ b/hypervideo_dl/extractor/common.py @@ -4,13 +4,12 @@ from __future__ import unicode_literals import base64 import datetime import hashlib +import itertools import json import netrc import os import random import re -import socket -import ssl import sys import time import math @@ -20,8 +19,8 @@ from ..compat import ( compat_cookies_SimpleCookie, compat_etree_Element, compat_etree_fromstring, + compat_expanduser, compat_getpass, - compat_integer_types, compat_http_client, compat_os_name, compat_str, @@ -32,12 +31,12 @@ from ..compat import ( compat_urlparse, compat_xml_parse_error, ) +from ..downloader import FileDownloader from ..downloader.f4m import ( get_base_url, remove_encrypted_media, ) from ..utils import ( - NO_DEFAULT, age_restricted, base_url, bug_reports_message, @@ -47,16 +46,19 @@ from ..utils import ( determine_protocol, dict_get, error_to_compat_str, - ExtractorError, extract_attributes, + ExtractorError, fix_xml_ampersands, float_or_none, + format_field, GeoRestrictedError, GeoUtils, int_or_none, js_to_json, JSON_LD_RE, mimetype2ext, + network_exceptions, + NO_DEFAULT, orderedSet, parse_bitrate, parse_codecs, @@ -65,19 +67,21 @@ from ..utils import ( parse_m3u8_attributes, parse_resolution, RegexNotFoundError, - sanitized_Request, sanitize_filename, + sanitized_Request, str_or_none, str_to_int, strip_or_none, + traverse_obj, unescapeHTML, unified_strdate, unified_timestamp, update_Request, update_url_query, - urljoin, url_basename, url_or_none, + urljoin, + variadic, xpath_element, xpath_text, xpath_with_ns, @@ -143,6 +147,8 @@ class InfoExtractor(object): * width Width of the video, if known * height Height of the video, if known * resolution Textual description of width and height + * dynamic_range The dynamic range of the video. One of: + "SDR" (None), "HDR10", "HDR10+, "HDR12", "HLG, "DV" * tbr Average bitrate of audio and video in KBit/s * abr Average audio bitrate in KBit/s * acodec Name of the audio codec in use @@ -156,7 +162,7 @@ class InfoExtractor(object): * player_url SWF Player URL (used for rtmpdump). * protocol The protocol that will be used for the actual download, lower-case. - "http", "https", "rtsp", "rtmp", "rtmpe", + "http", "https", "rtsp", "rtmp", "rtmp_ffmpeg", "rtmpe", "m3u8", "m3u8_native" or "http_dash_segments". * fragment_base_url Base URL for fragments. Each fragment's path @@ -201,8 +207,12 @@ class InfoExtractor(object): width : height ratio as float. * no_resume The server does not support resuming the (HTTP or RTMP) download. Boolean. + * has_drm The format has DRM and cannot be downloaded. Boolean * downloader_options A dictionary of downloader options as described in FileDownloader + RTMP formats can also have the additional fields: page_url, + app, play_path, tc_url, flash_version, rtmp_live, rtmp_conn, + rtmp_protocol, rtmp_real_time url: Final video URL. ext: Video filename extension. @@ -232,8 +242,7 @@ class InfoExtractor(object): creator: The creator of the video. release_timestamp: UNIX timestamp of the moment the video was released. release_date: The date (YYYYMMDD) when the video was released. - timestamp: UNIX timestamp of the moment the video became available - (uploaded). + timestamp: UNIX timestamp of the moment the video was uploaded upload_date: Video upload date (YYYYMMDD). If not explicitly set, calculated from timestamp. uploader_id: Nickname or id of the video uploader. @@ -251,9 +260,11 @@ class InfoExtractor(object): entry and one of: * "data": The subtitles file contents * "url": A URL pointing to the subtitles file + It can optionally also have: + * "name": Name or description of the subtitles "ext" will be calculated from URL if missing - automatic_captions: Like 'subtitles', used by the YoutubeIE for - automatically generated captions + automatic_captions: Like 'subtitles'; contains automatically generated + captions instead of normal subtitles duration: Length of the video in seconds, as an integer or float. view_count: How many users have watched the video on the platform. like_count: Number of positive ratings of the video @@ -265,6 +276,7 @@ class InfoExtractor(object): properties (all but one of text or html optional): * "author" - human-readable name of the comment author * "author_id" - user ID of the comment author + * "author_thumbnail" - The thumbnail of the comment author * "id" - Comment ID * "html" - Comment as HTML * "text" - Plain text of the comment @@ -272,6 +284,12 @@ class InfoExtractor(object): * "parent" - ID of the comment this one is replying to. Set to "root" to indicate that this is a comment to the original video. + * "like_count" - Number of positive ratings of the comment + * "dislike_count" - Number of negative ratings of the comment + * "is_favorited" - Whether the comment is marked as + favorite by the video uploader + * "author_is_uploader" - Whether the comment is made by + the video uploader age_limit: Age restriction for the video, as an integer (years) webpage_url: The URL to the video webpage, if given to hypervideo it should allow to get the same result again. (It will be set @@ -279,8 +297,13 @@ class InfoExtractor(object): categories: A list of categories that the video falls in, for example ["Sports", "Berlin"] tags: A list of tags assigned to the video, e.g. ["sweden", "pop music"] + cast: A list of the video cast is_live: True, False, or None (=unknown). Whether this video is a live stream that goes on instead of a fixed-length video. + was_live: True, False, or None (=unknown). Whether this video was + originally a live stream. + live_status: 'is_live', 'is_upcoming', 'was_live', 'not_live' or None (=unknown) + If absent, automatically set from is_live, was_live start_time: Time in seconds where the reproduction should start, as specified in the URL. end_time: Time in seconds where the reproduction should end, as @@ -289,6 +312,22 @@ class InfoExtractor(object): * "start_time" - The start time of the chapter in seconds * "end_time" - The end time of the chapter in seconds * "title" (optional, string) + playable_in_embed: Whether this video is allowed to play in embedded + players on other sites. Can be True (=always allowed), + False (=never allowed), None (=unknown), or a string + specifying the criteria for embedability (Eg: 'whitelist') + availability: Under what condition the video is available. One of + 'private', 'premium_only', 'subscriber_only', 'needs_auth', + 'unlisted' or 'public'. Use 'InfoExtractor._availability' + to set it + __post_extractor: A function to be called just before the metadata is + written to either disk, logger or console. The function + must return a dict which will be added to the info_dict. + This is usefull for additional information that is + time-consuming to extract. Note that the fields thus + extracted will not be available to output template and + match_filter. So, only "comments" and "comment_count" are + currently allowed to be extracted via this method. The following fields should only be used when the video belongs to some logical chapter or section: @@ -337,9 +376,8 @@ class InfoExtractor(object): There must be a key "entries", which is a list, an iterable, or a PagedList object, each element of which is a valid dictionary by this specification. - Additionally, playlists can have "id", "title", "description", "uploader", - "uploader_id", "uploader_url", "duration" attributes with the same semantics - as videos (see above). + Additionally, playlists can have "id", "title", and any other relevent + attributes with the same semantics as videos (see above). _type "multi_video" indicates that there are multiple videos that @@ -370,6 +408,10 @@ class InfoExtractor(object): _real_extract() methods and define a _VALID_URL regexp. Probably, they should also be added to the list of extractors. + Subclasses may also override suitable() if necessary, but ensure the function + signature is preserved and that this function imports everything it needs + (except other extractors), so that lazy_extractors works correctly + _GEO_BYPASS attribute may be set to False in order to disable geo restriction bypass mechanisms for a particular extractor. Though it won't disable explicit geo restriction bypass based on @@ -385,7 +427,7 @@ class InfoExtractor(object): will be used by geo restriction bypass mechanism similarly to _GEO_COUNTRIES. - Finally, the _WORKING attribute should be set to False for broken IEs + The _WORKING attribute should be set to False for broken IEs in order to warn the users and skip the tests. """ @@ -397,30 +439,47 @@ class InfoExtractor(object): _GEO_IP_BLOCKS = None _WORKING = True + _LOGIN_HINTS = { + 'any': 'Use --cookies, --username and --password or --netrc to provide account credentials', + 'cookies': ( + 'Use --cookies-from-browser or --cookies for the authentication. ' + 'See https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl for how to manually pass cookies'), + 'password': 'Use --username and --password or --netrc to provide account credentials', + } + def __init__(self, downloader=None): """Constructor. Receives an optional downloader.""" self._ready = False self._x_forwarded_for_ip = None + self._printed_messages = set() self.set_downloader(downloader) @classmethod - def suitable(cls, url): - """Receives a URL and returns True if suitable for this IE.""" - + def _match_valid_url(cls, url): # This does not use has/getattr intentionally - we want to know whether # we have cached the regexp for *this* class, whereas getattr would also # match the superclass if '_VALID_URL_RE' not in cls.__dict__: cls._VALID_URL_RE = re.compile(cls._VALID_URL) - return cls._VALID_URL_RE.match(url) is not None + return cls._VALID_URL_RE.match(url) + + @classmethod + def suitable(cls, url): + """Receives a URL and returns True if suitable for this IE.""" + # This function must import everything it needs (except other extractors), + # so that lazy_extractors works correctly + return cls._match_valid_url(url) is not None @classmethod def _match_id(cls, url): - if '_VALID_URL_RE' not in cls.__dict__: - cls._VALID_URL_RE = re.compile(cls._VALID_URL) - m = cls._VALID_URL_RE.match(url) - assert m - return compat_str(m.group('id')) + return cls._match_valid_url(url).group('id') + + @classmethod + def get_temp_id(cls, url): + try: + return cls._match_id(url) + except (IndexError, AttributeError): + return None @classmethod def working(cls): @@ -429,6 +488,7 @@ class InfoExtractor(object): def initialize(self): """Initializes an instance (authentication, etc).""" + self._printed_messages = set() self._initialize_geo_bypass({ 'countries': self._GEO_COUNTRIES, 'ip_blocks': self._GEO_IP_BLOCKS, @@ -466,7 +526,7 @@ class InfoExtractor(object): if not self._x_forwarded_for_ip: # Geo bypass mechanism is explicitly disabled by user - if not self._downloader.params.get('geo_bypass', True): + if not self.get_param('geo_bypass', True): return if not geo_bypass_context: @@ -488,7 +548,7 @@ class InfoExtractor(object): # Explicit IP block specified by user, use it right away # regardless of whether extractor is geo bypassable or not - ip_block = self._downloader.params.get('geo_bypass_ip_block', None) + ip_block = self.get_param('geo_bypass_ip_block', None) # Otherwise use random IP block from geo bypass context but only # if extractor is known as geo bypassable @@ -499,17 +559,15 @@ class InfoExtractor(object): if ip_block: self._x_forwarded_for_ip = GeoUtils.random_ipv4(ip_block) - if self._downloader.params.get('verbose', False): - self._downloader.to_screen( - '[debug] Using fake IP %s as X-Forwarded-For.' - % self._x_forwarded_for_ip) + self._downloader.write_debug( + '[debug] Using fake IP %s as X-Forwarded-For' % self._x_forwarded_for_ip) return # Path 2: bypassing based on country code # Explicit country code specified by user, use it right away # regardless of whether extractor is geo bypassable or not - country = self._downloader.params.get('geo_bypass_country', None) + country = self.get_param('geo_bypass_country', None) # Otherwise use random country code from geo bypass context but # only if extractor is known as geo bypassable @@ -520,10 +578,8 @@ class InfoExtractor(object): if country: self._x_forwarded_for_ip = GeoUtils.random_ipv4(country) - if self._downloader.params.get('verbose', False): - self._downloader.to_screen( - '[debug] Using fake IP %s (%s) as X-Forwarded-For.' - % (self._x_forwarded_for_ip, country.upper())) + self._downloader.write_debug( + 'Using fake IP %s (%s) as X-Forwarded-For' % (self._x_forwarded_for_ip, country.upper())) def extract(self, url): """Extracts URL information and returns it in list of dicts.""" @@ -531,25 +587,34 @@ class InfoExtractor(object): for _ in range(2): try: self.initialize() + self.write_debug('Extracting URL: %s' % url) ie_result = self._real_extract(url) + if ie_result is None: + return None if self._x_forwarded_for_ip: ie_result['__x_forwarded_for_ip'] = self._x_forwarded_for_ip + subtitles = ie_result.get('subtitles') + if (subtitles and 'live_chat' in subtitles + and 'no-live-chat' in self.get_param('compat_opts', [])): + del subtitles['live_chat'] return ie_result except GeoRestrictedError as e: if self.__maybe_fake_ip_and_retry(e.countries): continue raise - except ExtractorError: - raise + except ExtractorError as e: + video_id = e.video_id or self.get_temp_id(url) + raise ExtractorError( + e.msg, video_id=video_id, ie=self.IE_NAME, tb=e.traceback, expected=e.expected, cause=e.cause) except compat_http_client.IncompleteRead as e: - raise ExtractorError('A network error has occurred.', cause=e, expected=True) + raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url)) except (KeyError, StopIteration) as e: - raise ExtractorError('An extractor error has occurred.', cause=e) + raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url)) def __maybe_fake_ip_and_retry(self, countries): - if (not self._downloader.params.get('geo_bypass_country', None) + if (not self.get_param('geo_bypass_country', None) and self._GEO_BYPASS - and self._downloader.params.get('geo_bypass', True) + and self.get_param('geo_bypass', True) and not self._x_forwarded_for_ip and countries): country_code = random.choice(countries) @@ -576,7 +641,7 @@ class InfoExtractor(object): @classmethod def ie_key(cls): """A string for getting the InfoExtractor with get_info_extractor""" - return compat_str(cls.__name__[:-2]) + return cls.__name__[:-2] @property def IE_NAME(self): @@ -587,14 +652,10 @@ class InfoExtractor(object): assert isinstance(err, compat_urllib_error.HTTPError) if expected_status is None: return False - if isinstance(expected_status, compat_integer_types): - return err.code == expected_status - elif isinstance(expected_status, (list, tuple)): - return err.code in expected_status elif callable(expected_status): return expected_status(err.code) is True else: - assert False + return err.code in variadic(expected_status) def _request_webpage(self, url_or_request, video_id, note=None, errnote=None, fatal=True, data=None, headers={}, query={}, expected_status=None): """ @@ -602,6 +663,14 @@ class InfoExtractor(object): See _download_webpage docstring for arguments specification. """ + if not self._downloader._first_webpage_request: + sleep_interval = float_or_none(self.get_param('sleep_interval_requests')) or 0 + if sleep_interval > 0: + self.to_screen('Sleeping %s seconds ...' % sleep_interval) + time.sleep(sleep_interval) + else: + self._downloader._first_webpage_request = False + if note is None: self.report_download_webpage(video_id) elif note is not False: @@ -627,12 +696,9 @@ class InfoExtractor(object): url_or_request = update_url_query(url_or_request, query) if data is not None or headers: url_or_request = sanitized_Request(url_or_request, data, headers) - exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error] - if hasattr(ssl, 'CertificateError'): - exceptions.append(ssl.CertificateError) try: return self._downloader.urlopen(url_or_request) - except tuple(exceptions) as err: + except network_exceptions as err: if isinstance(err, compat_urllib_error.HTTPError): if self.__can_accept_status_code(err, expected_status): # Retain reference to error to prevent file object from @@ -651,7 +717,7 @@ class InfoExtractor(object): if fatal: raise ExtractorError(errmsg, sys.exc_info()[2], cause=err) else: - self._downloader.report_warning(errmsg) + self.report_warning(errmsg) return False def _download_webpage_handle(self, url_or_request, video_id, note=None, errnote=None, fatal=True, encoding=None, data=None, headers={}, query={}, expected_status=None): @@ -723,15 +789,16 @@ class InfoExtractor(object): webpage_bytes = prefix + webpage_bytes if not encoding: encoding = self._guess_encoding_from_content(content_type, webpage_bytes) - if self._downloader.params.get('dump_intermediate_pages', False): + if self.get_param('dump_intermediate_pages', False): self.to_screen('Dumping request to ' + urlh.geturl()) dump = base64.b64encode(webpage_bytes).decode('ascii') self._downloader.to_screen(dump) - if self._downloader.params.get('write_pages', False): + if self.get_param('write_pages', False): basen = '%s_%s' % (video_id, urlh.geturl()) - if len(basen) > 240: + trim_length = self.get_param('trim_file_name') or 240 + if len(basen) > trim_length: h = '___' + hashlib.md5(basen.encode('utf-8')).hexdigest() - basen = basen[:240 - len(h)] + h + basen = basen[:trim_length - len(h)] + h raw_filename = basen + '.dump' filename = sanitize_filename(raw_filename, restricted=True) self.to_screen('Saving request to ' + filename) @@ -911,14 +978,72 @@ class InfoExtractor(object): else: self.report_warning(errmsg + str(ve)) - def report_warning(self, msg, video_id=None): - idstr = '' if video_id is None else '%s: ' % video_id - self._downloader.report_warning( - '[%s] %s%s' % (self.IE_NAME, idstr, msg)) + def _parse_socket_response_as_json(self, data, video_id, transform_source=None, fatal=True): + return self._parse_json( + data[data.find('{'):data.rfind('}') + 1], + video_id, transform_source, fatal) + + def _download_socket_json_handle( + self, url_or_request, video_id, note='Polling socket', + errnote='Unable to poll socket', transform_source=None, + fatal=True, encoding=None, data=None, headers={}, query={}, + expected_status=None): + """ + Return a tuple (JSON object, URL handle). + + See _download_webpage docstring for arguments specification. + """ + res = self._download_webpage_handle( + url_or_request, video_id, note, errnote, fatal=fatal, + encoding=encoding, data=data, headers=headers, query=query, + expected_status=expected_status) + if res is False: + return res + webpage, urlh = res + return self._parse_socket_response_as_json( + webpage, video_id, transform_source=transform_source, + fatal=fatal), urlh + + def _download_socket_json( + self, url_or_request, video_id, note='Polling socket', + errnote='Unable to poll socket', transform_source=None, + fatal=True, encoding=None, data=None, headers={}, query={}, + expected_status=None): + """ + Return the JSON object as a dict. + + See _download_webpage docstring for arguments specification. + """ + res = self._download_socket_json_handle( + url_or_request, video_id, note=note, errnote=errnote, + transform_source=transform_source, fatal=fatal, encoding=encoding, + data=data, headers=headers, query=query, + expected_status=expected_status) + return res if res is False else res[0] + + def report_warning(self, msg, video_id=None, *args, only_once=False, **kwargs): + idstr = format_field(video_id, template='%s: ') + msg = f'[{self.IE_NAME}] {idstr}{msg}' + if only_once: + if f'WARNING: {msg}' in self._printed_messages: + return + self._printed_messages.add(f'WARNING: {msg}') + self._downloader.report_warning(msg, *args, **kwargs) - def to_screen(self, msg): + def to_screen(self, msg, *args, **kwargs): """Print msg to screen, prefixing it with '[ie_name]'""" - self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg)) + self._downloader.to_screen('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs) + + def write_debug(self, msg, *args, **kwargs): + self._downloader.write_debug('[%s] %s' % (self.IE_NAME, msg), *args, **kwargs) + + def get_param(self, name, default=None, *args, **kwargs): + if self._downloader: + return self._downloader.params.get(name, default, *args, **kwargs) + return default + + def report_drm(self, video_id, partial=False): + self.raise_no_formats('This video is DRM protected', expected=True, video_id=video_id) def report_extraction(self, id_or_name): """Report information extraction.""" @@ -936,24 +1061,40 @@ class InfoExtractor(object): """Report attempt to log in.""" self.to_screen('Logging in') - @staticmethod - def raise_login_required(msg='This video is only available for registered users'): - raise ExtractorError( - '%s. Use --username and --password or --netrc to provide account credentials.' % msg, - expected=True) + def raise_login_required( + self, msg='This video is only available for registered users', + metadata_available=False, method='any'): + if metadata_available and self.get_param('ignore_no_formats_error'): + self.report_warning(msg) + if method is not None: + msg = '%s. %s' % (msg, self._LOGIN_HINTS[method]) + raise ExtractorError(msg, expected=True) + + def raise_geo_restricted( + self, msg='This video is not available from your location due to geo restriction', + countries=None, metadata_available=False): + if metadata_available and self.get_param('ignore_no_formats_error'): + self.report_warning(msg) + else: + raise GeoRestrictedError(msg, countries=countries) - @staticmethod - def raise_geo_restricted(msg='This video is not available from your location due to geo restriction', countries=None): - raise GeoRestrictedError(msg, countries=countries) + def raise_no_formats(self, msg, expected=False, video_id=None): + if expected and self.get_param('ignore_no_formats_error'): + self.report_warning(msg, video_id) + elif isinstance(msg, ExtractorError): + raise msg + else: + raise ExtractorError(msg, expected=expected, video_id=video_id) # Methods for following #608 @staticmethod - def url_result(url, ie=None, video_id=None, video_title=None): + def url_result(url, ie=None, video_id=None, video_title=None, **kwargs): """Returns a URL that points to a page that should be processed""" # TODO: ie should be the class used for getting the info video_info = {'_type': 'url', 'url': url, 'ie_key': ie} + video_info.update(kwargs) if video_id is not None: video_info['id'] = video_id if video_title is not None: @@ -968,15 +1109,16 @@ class InfoExtractor(object): urls, playlist_id=playlist_id, playlist_title=playlist_title) @staticmethod - def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None): + def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None, **kwargs): """Returns a playlist""" video_info = {'_type': 'playlist', 'entries': entries} + video_info.update(kwargs) if playlist_id: video_info['id'] = playlist_id if playlist_title: video_info['title'] = playlist_title - if playlist_description: + if playlist_description is not None: video_info['description'] = playlist_description return video_info @@ -995,15 +1137,14 @@ class InfoExtractor(object): if mobj: break - if not self._downloader.params.get('no_color') and compat_os_name != 'nt' and sys.stderr.isatty(): - _name = '\033[0;34m%s\033[0m' % name - else: - _name = name + _name = self._downloader._color_text(name, 'blue') if mobj: if group is None: # return the first matching group return next(g for g in mobj.groups() if g is not None) + elif isinstance(group, (list, tuple)): + return tuple(mobj.group(g) for g in group) else: return mobj.group(group) elif default is not NO_DEFAULT: @@ -1011,7 +1152,7 @@ class InfoExtractor(object): elif fatal: raise RegexNotFoundError('Unable to extract %s' % _name) else: - self._downloader.report_warning('unable to extract %s' % _name + bug_reports_message()) + self.report_warning('unable to extract %s' % _name + bug_reports_message()) return None def _html_search_regex(self, pattern, string, name, default=NO_DEFAULT, fatal=True, flags=0, group=None): @@ -1029,9 +1170,12 @@ class InfoExtractor(object): password = None netrc_machine = netrc_machine or self._NETRC_MACHINE - if self._downloader.params.get('usenetrc', False): + if self.get_param('usenetrc', False): try: - info = netrc.netrc().authenticators(netrc_machine) + netrc_file = compat_expanduser(self.get_param('netrc_location') or '~') + if os.path.isdir(netrc_file): + netrc_file = os.path.join(netrc_file, '.netrc') + info = netrc.netrc(file=netrc_file).authenticators(netrc_machine) if info is not None: username = info[0] password = info[2] @@ -1039,7 +1183,7 @@ class InfoExtractor(object): raise netrc.NetrcParseError( 'No authenticators for %s' % netrc_machine) except (IOError, netrc.NetrcParseError) as err: - self._downloader.report_warning( + self.report_warning( 'parsing .netrc: %s' % error_to_compat_str(err)) return username, password @@ -1053,15 +1197,11 @@ class InfoExtractor(object): value. If there's no info available, return (None, None) """ - if self._downloader is None: - return (None, None) - - downloader_params = self._downloader.params # Attempt to use provided username and password or .netrc data - if downloader_params.get(username_option) is not None: - username = downloader_params[username_option] - password = downloader_params[password_option] + username = self.get_param(username_option) + if username is not None: + password = self.get_param(password_option) else: username, password = self._get_netrc_login_info(netrc_machine) @@ -1074,12 +1214,10 @@ class InfoExtractor(object): currently just uses the command line option If there's no info available, return None """ - if self._downloader is None: - return None - downloader_params = self._downloader.params - if downloader_params.get('twofactor') is not None: - return downloader_params['twofactor'] + tfa = self.get_param('twofactor') + if tfa is not None: + return tfa return compat_getpass('Type %s and press [Return]: ' % note) @@ -1102,8 +1240,7 @@ class InfoExtractor(object): [^>]+?content=(["\'])(?P<content>.*?)\2''' % re.escape(prop) def _og_search_property(self, prop, html, name=None, **kargs): - if not isinstance(prop, (list, tuple)): - prop = [prop] + prop = variadic(prop) if name is None: name = 'OpenGraph %s' % prop[0] og_regexes = [] @@ -1133,8 +1270,7 @@ class InfoExtractor(object): return self._og_search_property('url', html, **kargs) def _html_search_meta(self, name, html, display_name=None, fatal=False, **kwargs): - if not isinstance(name, (list, tuple)): - name = [name] + name = variadic(name) if display_name is None: display_name = name[0] return self._html_search_regex( @@ -1194,7 +1330,7 @@ class InfoExtractor(object): # JSON-LD may be malformed and thus `fatal` should be respected. # At the same time `default` may be passed that assumes `fatal=False` # for _search_regex. Let's simulate the same behavior here as well. - fatal = kwargs.get('fatal', True) if default == NO_DEFAULT else False + fatal = kwargs.get('fatal', True) if default is NO_DEFAULT else False json_ld = [] for mobj in json_ld_list: json_ld_item = self._parse_json( @@ -1214,7 +1350,7 @@ class InfoExtractor(object): elif fatal: raise RegexNotFoundError('Unable to extract JSON-LD') else: - self._downloader.report_warning('unable to extract JSON-LD %s' % bug_reports_message()) + self.report_warning('unable to extract JSON-LD %s' % bug_reports_message()) return {} def _json_ld(self, json_ld, video_id, fatal=True, expected_type=None): @@ -1369,81 +1505,283 @@ class InfoExtractor(object): html, '%s form' % form_id, group='form') return self._hidden_inputs(form) - def _sort_formats(self, formats, field_preference=None): - if not formats: - raise ExtractorError('No video formats found') + class FormatSort: + regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$' + + default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality', + 'res', 'fps', 'hdr:12', 'codec:vp9.2', 'size', 'br', 'asr', + 'proto', 'ext', 'hasaud', 'source', 'format_id') # These must not be aliases + ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr', + 'height', 'width', 'proto', 'vext', 'abr', 'aext', + 'fps', 'fs_approx', 'source', 'format_id') + + settings = { + 'vcodec': {'type': 'ordered', 'regex': True, + 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']}, + 'acodec': {'type': 'ordered', 'regex': True, + 'order': ['opus', 'vorbis', 'aac', 'mp?4a?', 'mp3', 'e?a?c-?3', 'dts', '', None, 'none']}, + 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range', + 'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]}, + 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol', + 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.+', '.*dash', 'ws|websocket', '', 'mms|rtsp', 'none', 'f4']}, + 'vext': {'type': 'ordered', 'field': 'video_ext', + 'order': ('mp4', 'webm', 'flv', '', 'none'), + 'order_free': ('webm', 'mp4', 'flv', '', 'none')}, + 'aext': {'type': 'ordered', 'field': 'audio_ext', + 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'webm', '', 'none'), + 'order_free': ('opus', 'ogg', 'webm', 'm4a', 'mp3', 'aac', '', 'none')}, + 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000}, + 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple', + 'field': ('vcodec', 'acodec'), + 'function': lambda it: int(any(v != 'none' for v in it))}, + 'ie_pref': {'priority': True, 'type': 'extractor'}, + 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)}, + 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)}, + 'lang': {'convert': 'ignore', 'field': 'language_preference'}, + 'quality': {'convert': 'float_none', 'default': -1}, + 'filesize': {'convert': 'bytes'}, + 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'}, + 'id': {'convert': 'string', 'field': 'format_id'}, + 'height': {'convert': 'float_none'}, + 'width': {'convert': 'float_none'}, + 'fps': {'convert': 'float_none'}, + 'tbr': {'convert': 'float_none'}, + 'vbr': {'convert': 'float_none'}, + 'abr': {'convert': 'float_none'}, + 'asr': {'convert': 'float_none'}, + 'source': {'convert': 'ignore', 'field': 'source_preference'}, + + 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')}, + 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True}, + 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')}, + 'ext': {'type': 'combined', 'field': ('vext', 'aext')}, + 'res': {'type': 'multiple', 'field': ('height', 'width'), + 'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))}, + + # Most of these exist only for compatibility reasons + 'dimension': {'type': 'alias', 'field': 'res'}, + 'resolution': {'type': 'alias', 'field': 'res'}, + 'extension': {'type': 'alias', 'field': 'ext'}, + 'bitrate': {'type': 'alias', 'field': 'br'}, + 'total_bitrate': {'type': 'alias', 'field': 'tbr'}, + 'video_bitrate': {'type': 'alias', 'field': 'vbr'}, + 'audio_bitrate': {'type': 'alias', 'field': 'abr'}, + 'framerate': {'type': 'alias', 'field': 'fps'}, + 'language_preference': {'type': 'alias', 'field': 'lang'}, # not named as 'language' because such a field exists + 'protocol': {'type': 'alias', 'field': 'proto'}, + 'source_preference': {'type': 'alias', 'field': 'source'}, + 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'}, + 'filesize_estimate': {'type': 'alias', 'field': 'size'}, + 'samplerate': {'type': 'alias', 'field': 'asr'}, + 'video_ext': {'type': 'alias', 'field': 'vext'}, + 'audio_ext': {'type': 'alias', 'field': 'aext'}, + 'video_codec': {'type': 'alias', 'field': 'vcodec'}, + 'audio_codec': {'type': 'alias', 'field': 'acodec'}, + 'video': {'type': 'alias', 'field': 'hasvid'}, + 'has_video': {'type': 'alias', 'field': 'hasvid'}, + 'audio': {'type': 'alias', 'field': 'hasaud'}, + 'has_audio': {'type': 'alias', 'field': 'hasaud'}, + 'extractor': {'type': 'alias', 'field': 'ie_pref'}, + 'preference': {'type': 'alias', 'field': 'ie_pref'}, + 'extractor_preference': {'type': 'alias', 'field': 'ie_pref'}, + 'format_id': {'type': 'alias', 'field': 'id'}, + } - for f in formats: - # Automatically determine tbr when missing based on abr and vbr (improves - # formats sorting in some cases) - if 'tbr' not in f and f.get('abr') is not None and f.get('vbr') is not None: - f['tbr'] = f['abr'] + f['vbr'] - - def _formats_key(f): - # TODO remove the following workaround - from ..utils import determine_ext - if not f.get('ext') and 'url' in f: - f['ext'] = determine_ext(f['url']) - - if isinstance(field_preference, (list, tuple)): - return tuple( - f.get(field) - if f.get(field) is not None - else ('' if field == 'format_id' else -1) - for field in field_preference) - - preference = f.get('preference') - if preference is None: - preference = 0 - if f.get('ext') in ['f4f', 'f4m']: # Not yet supported - preference -= 0.5 - - protocol = f.get('protocol') or determine_protocol(f) - proto_preference = 0 if protocol in ['http', 'https'] else (-0.5 if protocol == 'rtsp' else -0.1) - - if f.get('vcodec') == 'none': # audio only - preference -= 50 - if self._downloader.params.get('prefer_free_formats'): - ORDER = ['aac', 'mp3', 'm4a', 'webm', 'ogg', 'opus'] + _order = [] + + def _get_field_setting(self, field, key): + if field not in self.settings: + self.settings[field] = {} + propObj = self.settings[field] + if key not in propObj: + type = propObj.get('type') + if key == 'field': + default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field + elif key == 'convert': + default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore' else: - ORDER = ['webm', 'opus', 'ogg', 'mp3', 'aac', 'm4a'] - ext_preference = 0 - try: - audio_ext_preference = ORDER.index(f['ext']) - except ValueError: - audio_ext_preference = -1 + default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None) + propObj[key] = default + return propObj[key] + + def _resolve_field_value(self, field, value, convertNone=False): + if value is None: + if not convertNone: + return None + else: + value = value.lower() + conversion = self._get_field_setting(field, 'convert') + if conversion == 'ignore': + return None + if conversion == 'string': + return value + elif conversion == 'float_none': + return float_or_none(value) + elif conversion == 'bytes': + return FileDownloader.parse_bytes(value) + elif conversion == 'order': + order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order') + use_regex = self._get_field_setting(field, 'regex') + list_length = len(order_list) + empty_pos = order_list.index('') if '' in order_list else list_length + 1 + if use_regex and value is not None: + for i, regex in enumerate(order_list): + if regex and re.match(regex, value): + return list_length - i + return list_length - empty_pos # not in list + else: # not regex or value = None + return list_length - (order_list.index(value) if value in order_list else empty_pos) else: - if f.get('acodec') == 'none': # video only - preference -= 40 - if self._downloader.params.get('prefer_free_formats'): - ORDER = ['flv', 'mp4', 'webm'] + if value.isnumeric(): + return float(value) else: - ORDER = ['webm', 'flv', 'mp4'] - try: - ext_preference = ORDER.index(f['ext']) - except ValueError: - ext_preference = -1 - audio_ext_preference = 0 - - return ( - preference, - f.get('language_preference') if f.get('language_preference') is not None else -1, - f.get('quality') if f.get('quality') is not None else -1, - f.get('tbr') if f.get('tbr') is not None else -1, - f.get('filesize') if f.get('filesize') is not None else -1, - f.get('vbr') if f.get('vbr') is not None else -1, - f.get('height') if f.get('height') is not None else -1, - f.get('width') if f.get('width') is not None else -1, - proto_preference, - ext_preference, - f.get('abr') if f.get('abr') is not None else -1, - audio_ext_preference, - f.get('fps') if f.get('fps') is not None else -1, - f.get('filesize_approx') if f.get('filesize_approx') is not None else -1, - f.get('source_preference') if f.get('source_preference') is not None else -1, - f.get('format_id') if f.get('format_id') is not None else '', - ) - formats.sort(key=_formats_key) + self.settings[field]['convert'] = 'string' + return value + + def evaluate_params(self, params, sort_extractor): + self._use_free_order = params.get('prefer_free_formats', False) + self._sort_user = params.get('format_sort', []) + self._sort_extractor = sort_extractor + + def add_item(field, reverse, closest, limit_text): + field = field.lower() + if field in self._order: + return + self._order.append(field) + limit = self._resolve_field_value(field, limit_text) + data = { + 'reverse': reverse, + 'closest': False if limit is None else closest, + 'limit_text': limit_text, + 'limit': limit} + if field in self.settings: + self.settings[field].update(data) + else: + self.settings[field] = data + + sort_list = ( + tuple(field for field in self.default if self._get_field_setting(field, 'forced')) + + (tuple() if params.get('format_sort_force', False) + else tuple(field for field in self.default if self._get_field_setting(field, 'priority'))) + + tuple(self._sort_user) + tuple(sort_extractor) + self.default) + + for item in sort_list: + match = re.match(self.regex, item) + if match is None: + raise ExtractorError('Invalid format sort string "%s" given by extractor' % item) + field = match.group('field') + if field is None: + continue + if self._get_field_setting(field, 'type') == 'alias': + field = self._get_field_setting(field, 'field') + reverse = match.group('reverse') is not None + closest = match.group('separator') == '~' + limit_text = match.group('limit') + + has_limit = limit_text is not None + has_multiple_fields = self._get_field_setting(field, 'type') == 'combined' + has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit') + + fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,) + limits = limit_text.split(':') if has_multiple_limits else (limit_text,) if has_limit else tuple() + limit_count = len(limits) + for (i, f) in enumerate(fields): + add_item(f, reverse, closest, + limits[i] if i < limit_count + else limits[0] if has_limit and not has_multiple_limits + else None) + + def print_verbose_info(self, write_debug): + if self._sort_user: + write_debug('Sort order given by user: %s' % ', '.join(self._sort_user)) + if self._sort_extractor: + write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor)) + write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % ( + '+' if self._get_field_setting(field, 'reverse') else '', field, + '%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':', + self._get_field_setting(field, 'limit_text'), + self._get_field_setting(field, 'limit')) + if self._get_field_setting(field, 'limit_text') is not None else '') + for field in self._order if self._get_field_setting(field, 'visible')])) + + def _calculate_field_preference_from_value(self, format, field, type, value): + reverse = self._get_field_setting(field, 'reverse') + closest = self._get_field_setting(field, 'closest') + limit = self._get_field_setting(field, 'limit') + + if type == 'extractor': + maximum = self._get_field_setting(field, 'max') + if value is None or (maximum is not None and value >= maximum): + value = -1 + elif type == 'boolean': + in_list = self._get_field_setting(field, 'in_list') + not_in_list = self._get_field_setting(field, 'not_in_list') + value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1 + elif type == 'ordered': + value = self._resolve_field_value(field, value, True) + + # try to convert to number + val_num = float_or_none(value, default=self._get_field_setting(field, 'default')) + is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None + if is_num: + value = val_num + + return ((-10, 0) if value is None + else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher + else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest + else (0, value, 0) if not reverse and (limit is None or value <= limit) + else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit + else (-1, value, 0)) + + def _calculate_field_preference(self, format, field): + type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple + get_value = lambda f: format.get(self._get_field_setting(f, 'field')) + if type == 'multiple': + type = 'field' # Only 'field' is allowed in multiple for now + actual_fields = self._get_field_setting(field, 'field') + + value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields) + else: + value = get_value(field) + return self._calculate_field_preference_from_value(format, field, type, value) + + def calculate_preference(self, format): + # Determine missing protocol + if not format.get('protocol'): + format['protocol'] = determine_protocol(format) + + # Determine missing ext + if not format.get('ext') and 'url' in format: + format['ext'] = determine_ext(format['url']) + if format.get('vcodec') == 'none': + format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none' + format['video_ext'] = 'none' + else: + format['video_ext'] = format['ext'] + format['audio_ext'] = 'none' + # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported? + # format['preference'] = -1000 + + # Determine missing bitrates + if format.get('tbr') is None: + if format.get('vbr') is not None and format.get('abr') is not None: + format['tbr'] = format.get('vbr', 0) + format.get('abr', 0) + else: + if format.get('vcodec') != 'none' and format.get('vbr') is None: + format['vbr'] = format.get('tbr') - format.get('abr', 0) + if format.get('acodec') != 'none' and format.get('abr') is None: + format['abr'] = format.get('tbr') - format.get('vbr', 0) + + return tuple(self._calculate_field_preference(format, field) for field in self._order) + + def _sort_formats(self, formats, field_preference=[]): + if not formats: + return + format_sort = self.FormatSort() # params and to_screen are taken from the downloader + format_sort.evaluate_params(self._downloader.params, field_preference) + if self.get_param('verbose', False): + format_sort.print_verbose_info(self._downloader.write_debug) + formats.sort(key=lambda f: format_sort.calculate_preference(f)) def _check_formats(self, formats, video_id): if formats: @@ -1481,7 +1819,7 @@ class InfoExtractor(object): """ Either "http:" or "https:", depending on the user's preferences """ return ( 'http:' - if self._downloader.params.get('prefer_insecure', False) + if self.get_param('prefer_insecure', False) else 'https:') def _proto_relative_url(self, url, scheme=None): @@ -1501,7 +1839,7 @@ class InfoExtractor(object): self.to_screen(msg) time.sleep(timeout) - def _extract_f4m_formats(self, manifest_url, video_id, preference=None, f4m_id=None, + def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=None, f4m_id=None, transform_source=lambda s: fix_xml_ampersands(s).strip(), fatal=True, m3u8_id=None, data=None, headers={}, query={}): manifest = self._download_xml( @@ -1516,10 +1854,10 @@ class InfoExtractor(object): return [] return self._parse_f4m_formats( - manifest, manifest_url, video_id, preference=preference, f4m_id=f4m_id, + manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id, transform_source=transform_source, fatal=fatal, m3u8_id=m3u8_id) - def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, f4m_id=None, + def _parse_f4m_formats(self, manifest, manifest_url, video_id, preference=None, quality=None, f4m_id=None, transform_source=lambda s: fix_xml_ampersands(s).strip(), fatal=True, m3u8_id=None): if not isinstance(manifest, compat_etree_Element) and not fatal: @@ -1584,7 +1922,7 @@ class InfoExtractor(object): ext = determine_ext(manifest_url) if ext == 'f4m': f4m_formats = self._extract_f4m_formats( - manifest_url, video_id, preference=preference, f4m_id=f4m_id, + manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id, transform_source=transform_source, fatal=fatal) # Sometimes stream-level manifest contains single media entry that # does not contain any quality metadata (e.g. http://matchtv.ru/#live-player). @@ -1604,7 +1942,7 @@ class InfoExtractor(object): elif ext == 'm3u8': formats.extend(self._extract_m3u8_formats( manifest_url, video_id, 'mp4', preference=preference, - m3u8_id=m3u8_id, fatal=fatal)) + quality=quality, m3u8_id=m3u8_id, fatal=fatal)) continue formats.append({ 'format_id': format_id, @@ -1617,56 +1955,88 @@ class InfoExtractor(object): 'height': height, 'vcodec': vcodec, 'preference': preference, + 'quality': quality, }) return formats - def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, m3u8_id=None): + def _m3u8_meta_format(self, m3u8_url, ext=None, preference=None, quality=None, m3u8_id=None): return { 'format_id': '-'.join(filter(None, [m3u8_id, 'meta'])), 'url': m3u8_url, 'ext': ext, 'protocol': 'm3u8', 'preference': preference - 100 if preference else -100, + 'quality': quality, 'resolution': 'multiple', 'format_note': 'Quality selection URL', } - def _extract_m3u8_formats(self, m3u8_url, video_id, ext=None, - entry_protocol='m3u8', preference=None, - m3u8_id=None, note=None, errnote=None, - fatal=True, live=False, data=None, headers={}, - query={}): + def _report_ignoring_subs(self, name): + self.report_warning(bug_reports_message( + f'Ignoring subtitle tracks found in the {name} manifest; ' + 'if any subtitle tracks are missing,' + ), only_once=True) + + def _extract_m3u8_formats(self, *args, **kwargs): + fmts, subs = self._extract_m3u8_formats_and_subtitles(*args, **kwargs) + if subs: + self._report_ignoring_subs('HLS') + return fmts + + def _extract_m3u8_formats_and_subtitles( + self, m3u8_url, video_id, ext=None, entry_protocol='m3u8_native', + preference=None, quality=None, m3u8_id=None, note=None, + errnote=None, fatal=True, live=False, data=None, headers={}, + query={}): + res = self._download_webpage_handle( m3u8_url, video_id, - note=note or 'Downloading m3u8 information', - errnote=errnote or 'Failed to download m3u8 information', + note='Downloading m3u8 information' if note is None else note, + errnote='Failed to download m3u8 information' if errnote is None else errnote, fatal=fatal, data=data, headers=headers, query=query) if res is False: - return [] + return [], {} m3u8_doc, urlh = res m3u8_url = urlh.geturl() - return self._parse_m3u8_formats( + return self._parse_m3u8_formats_and_subtitles( m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol, - preference=preference, m3u8_id=m3u8_id, live=live) + preference=preference, quality=quality, m3u8_id=m3u8_id, + note=note, errnote=errnote, fatal=fatal, live=live, data=data, + headers=headers, query=query, video_id=video_id) + + def _parse_m3u8_formats_and_subtitles( + self, m3u8_doc, m3u8_url, ext=None, entry_protocol='m3u8_native', + preference=None, quality=None, m3u8_id=None, live=False, note=None, + errnote=None, fatal=True, data=None, headers={}, query={}, + video_id=None): + formats, subtitles = [], {} - def _parse_m3u8_formats(self, m3u8_doc, m3u8_url, ext=None, - entry_protocol='m3u8', preference=None, - m3u8_id=None, live=False): if '#EXT-X-FAXS-CM:' in m3u8_doc: # Adobe Flash Access - return [] + return formats, subtitles - if re.search(r'#EXT-X-SESSION-KEY:.*?URI="skd://', m3u8_doc): # Apple FairPlay - return [] + has_drm = re.search(r'#EXT-X-(?:SESSION-)?KEY:.*?URI="skd://', m3u8_doc) - formats = [] + def format_url(url): + return url if re.match(r'^https?://', url) else compat_urlparse.urljoin(m3u8_url, url) - format_url = lambda u: ( - u - if re.match(r'^https?://', u) - else compat_urlparse.urljoin(m3u8_url, u)) + if self.get_param('hls_split_discontinuity', False): + def _extract_m3u8_playlist_indices(manifest_url=None, m3u8_doc=None): + if not m3u8_doc: + if not manifest_url: + return [] + m3u8_doc = self._download_webpage( + manifest_url, video_id, fatal=fatal, data=data, headers=headers, + note=False, errnote='Failed to download m3u8 playlist information') + if m3u8_doc is False: + return [] + return range(1 + sum(line.startswith('#EXT-X-DISCONTINUITY') for line in m3u8_doc.splitlines())) + + else: + def _extract_m3u8_playlist_indices(*args, **kwargs): + return [None] # References: # 1. https://tools.ietf.org/html/draft-pantos-http-live-streaming-21 @@ -1685,13 +2055,18 @@ class InfoExtractor(object): # clearly detect media playlist with this criterion. if '#EXT-X-TARGETDURATION' in m3u8_doc: # media playlist, return as is - return [{ + formats = [{ + 'format_id': '-'.join(map(str, filter(None, [m3u8_id, idx]))), + 'format_index': idx, 'url': m3u8_url, - 'format_id': m3u8_id, 'ext': ext, 'protocol': entry_protocol, 'preference': preference, - }] + 'quality': quality, + 'has_drm': has_drm, + } for idx in _extract_m3u8_playlist_indices(m3u8_doc=m3u8_doc)] + + return formats, subtitles groups = {} last_stream_inf = {} @@ -1703,26 +2078,45 @@ class InfoExtractor(object): if not (media_type and group_id and name): return groups.setdefault(group_id, []).append(media) + # <https://tools.ietf.org/html/rfc8216#section-4.3.4.1> + if media_type == 'SUBTITLES': + # According to RFC 8216 §4.3.4.2.1, URI is REQUIRED in the + # EXT-X-MEDIA tag if the media type is SUBTITLES. + # However, lack of URI has been spotted in the wild. + # e.g. NebulaIE; see https://github.com/hypervideo/hypervideo/issues/339 + if not media.get('URI'): + return + url = format_url(media['URI']) + sub_info = { + 'url': url, + 'ext': determine_ext(url), + } + if sub_info['ext'] == 'm3u8': + # Per RFC 8216 §3.1, the only possible subtitle format m3u8 + # files may contain is WebVTT: + # <https://tools.ietf.org/html/rfc8216#section-3.1> + sub_info['ext'] = 'vtt' + sub_info['protocol'] = 'm3u8_native' + lang = media.get('LANGUAGE') or 'und' + subtitles.setdefault(lang, []).append(sub_info) if media_type not in ('VIDEO', 'AUDIO'): return media_url = media.get('URI') if media_url: - format_id = [] - for v in (m3u8_id, group_id, name): - if v: - format_id.append(v) - f = { - 'format_id': '-'.join(format_id), - 'url': format_url(media_url), + manifest_url = format_url(media_url) + formats.extend({ + 'format_id': '-'.join(map(str, filter(None, (m3u8_id, group_id, name, idx)))), + 'format_note': name, + 'format_index': idx, + 'url': manifest_url, 'manifest_url': m3u8_url, 'language': media.get('LANGUAGE'), 'ext': ext, 'protocol': entry_protocol, 'preference': preference, - } - if media_type == 'AUDIO': - f['vcodec'] = 'none' - formats.append(f) + 'quality': quality, + 'vcodec': 'none' if media_type == 'AUDIO' else None, + } for idx in _extract_m3u8_playlist_indices(manifest_url)) def build_stream_name(): # Despite specification does not mention NAME attribute for @@ -1759,76 +2153,99 @@ class InfoExtractor(object): tbr = float_or_none( last_stream_inf.get('AVERAGE-BANDWIDTH') or last_stream_inf.get('BANDWIDTH'), scale=1000) - format_id = [] - if m3u8_id: - format_id.append(m3u8_id) - stream_name = build_stream_name() - # Bandwidth of live streams may differ over time thus making - # format_id unpredictable. So it's better to keep provided - # format_id intact. - if not live: - format_id.append(stream_name if stream_name else '%d' % (tbr if tbr else len(formats))) manifest_url = format_url(line.strip()) - f = { - 'format_id': '-'.join(format_id), - 'url': manifest_url, - 'manifest_url': m3u8_url, - 'tbr': tbr, - 'ext': ext, - 'fps': float_or_none(last_stream_inf.get('FRAME-RATE')), - 'protocol': entry_protocol, - 'preference': preference, - } - resolution = last_stream_inf.get('RESOLUTION') - if resolution: - mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution) + + for idx in _extract_m3u8_playlist_indices(manifest_url): + format_id = [m3u8_id, None, idx] + # Bandwidth of live streams may differ over time thus making + # format_id unpredictable. So it's better to keep provided + # format_id intact. + if not live: + stream_name = build_stream_name() + format_id[1] = stream_name if stream_name else '%d' % (tbr if tbr else len(formats)) + f = { + 'format_id': '-'.join(map(str, filter(None, format_id))), + 'format_index': idx, + 'url': manifest_url, + 'manifest_url': m3u8_url, + 'tbr': tbr, + 'ext': ext, + 'fps': float_or_none(last_stream_inf.get('FRAME-RATE')), + 'protocol': entry_protocol, + 'preference': preference, + 'quality': quality, + } + resolution = last_stream_inf.get('RESOLUTION') + if resolution: + mobj = re.search(r'(?P<width>\d+)[xX](?P<height>\d+)', resolution) + if mobj: + f['width'] = int(mobj.group('width')) + f['height'] = int(mobj.group('height')) + # Unified Streaming Platform + mobj = re.search( + r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url']) if mobj: - f['width'] = int(mobj.group('width')) - f['height'] = int(mobj.group('height')) - # Unified Streaming Platform - mobj = re.search( - r'audio.*?(?:%3D|=)(\d+)(?:-video.*?(?:%3D|=)(\d+))?', f['url']) - if mobj: - abr, vbr = mobj.groups() - abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000) - f.update({ - 'vbr': vbr, - 'abr': abr, - }) - codecs = parse_codecs(last_stream_inf.get('CODECS')) - f.update(codecs) - audio_group_id = last_stream_inf.get('AUDIO') - # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which - # references a rendition group MUST have a CODECS attribute. - # However, this is not always respected, for example, [2] - # contains EXT-X-STREAM-INF tag which references AUDIO - # rendition group but does not have CODECS and despite - # referencing an audio group it represents a complete - # (with audio and video) format. So, for such cases we will - # ignore references to rendition groups and treat them - # as complete formats. - if audio_group_id and codecs and f.get('vcodec') != 'none': - audio_group = groups.get(audio_group_id) - if audio_group and audio_group[0].get('URI'): - # TODO: update acodec for audio only formats with - # the same GROUP-ID - f['acodec'] = 'none' - formats.append(f) - - # for DailyMotion - progressive_uri = last_stream_inf.get('PROGRESSIVE-URI') - if progressive_uri: - http_f = f.copy() - del http_f['manifest_url'] - http_f.update({ - 'format_id': f['format_id'].replace('hls-', 'http-'), - 'protocol': 'http', - 'url': progressive_uri, - }) - formats.append(http_f) + abr, vbr = mobj.groups() + abr, vbr = float_or_none(abr, 1000), float_or_none(vbr, 1000) + f.update({ + 'vbr': vbr, + 'abr': abr, + }) + codecs = parse_codecs(last_stream_inf.get('CODECS')) + f.update(codecs) + audio_group_id = last_stream_inf.get('AUDIO') + # As per [1, 4.3.4.1.1] any EXT-X-STREAM-INF tag which + # references a rendition group MUST have a CODECS attribute. + # However, this is not always respected, for example, [2] + # contains EXT-X-STREAM-INF tag which references AUDIO + # rendition group but does not have CODECS and despite + # referencing an audio group it represents a complete + # (with audio and video) format. So, for such cases we will + # ignore references to rendition groups and treat them + # as complete formats. + if audio_group_id and codecs and f.get('vcodec') != 'none': + audio_group = groups.get(audio_group_id) + if audio_group and audio_group[0].get('URI'): + # TODO: update acodec for audio only formats with + # the same GROUP-ID + f['acodec'] = 'none' + if not f.get('ext'): + f['ext'] = 'm4a' if f.get('vcodec') == 'none' else 'mp4' + formats.append(f) + + # for DailyMotion + progressive_uri = last_stream_inf.get('PROGRESSIVE-URI') + if progressive_uri: + http_f = f.copy() + del http_f['manifest_url'] + http_f.update({ + 'format_id': f['format_id'].replace('hls-', 'http-'), + 'protocol': 'http', + 'url': progressive_uri, + }) + formats.append(http_f) last_stream_inf = {} - return formats + return formats, subtitles + + def _extract_m3u8_vod_duration( + self, m3u8_vod_url, video_id, note=None, errnote=None, data=None, headers={}, query={}): + + m3u8_vod = self._download_webpage( + m3u8_vod_url, video_id, + note='Downloading m3u8 VOD manifest' if note is None else note, + errnote='Failed to download VOD manifest' if errnote is None else errnote, + fatal=False, data=data, headers=headers, query=query) + + return self._parse_m3u8_vod_duration(m3u8_vod or '', video_id) + + def _parse_m3u8_vod_duration(self, m3u8_vod, video_id): + if '#EXT-X-PLAYLIST-TYPE:VOD' not in m3u8_vod: + return None + + return int(sum( + float(line[len('#EXTINF:'):].split(',')[0]) + for line in m3u8_vod.splitlines() if line.startswith('#EXTINF:'))) or None @staticmethod def _xpath_ns(path, namespace=None): @@ -1842,7 +2259,7 @@ class InfoExtractor(object): out.append('{%s}%s' % (namespace, c)) return '/'.join(out) - def _extract_smil_formats(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None): + def _extract_smil_formats_and_subtitles(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None): smil = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source) if smil is False: @@ -1851,8 +2268,18 @@ class InfoExtractor(object): namespace = self._parse_smil_namespace(smil) - return self._parse_smil_formats( + fmts = self._parse_smil_formats( smil, smil_url, video_id, namespace=namespace, f4m_params=f4m_params) + subs = self._parse_smil_subtitles( + smil, namespace=namespace) + + return fmts, subs + + def _extract_smil_formats(self, *args, **kwargs): + fmts, subs = self._extract_smil_formats_and_subtitles(*args, **kwargs) + if subs: + self._report_ignoring_subs('SMIL') + return fmts def _extract_smil_info(self, smil_url, video_id, fatal=True, f4m_params=None): smil = self._download_smil(smil_url, video_id, fatal=fatal) @@ -1921,14 +2348,15 @@ class InfoExtractor(object): rtmp_count = 0 http_count = 0 m3u8_count = 0 + imgs_count = 0 - srcs = [] + srcs = set() media = smil.findall(self._xpath_ns('.//video', namespace)) + smil.findall(self._xpath_ns('.//audio', namespace)) for medium in media: src = medium.get('src') if not src or src in srcs: continue - srcs.append(src) + srcs.add(src) bitrate = float_or_none(medium.get('system-bitrate') or medium.get('systemBitrate'), 1000) filesize = int_or_none(medium.get('size') or medium.get('fileSize')) @@ -2002,6 +2430,24 @@ class InfoExtractor(object): 'height': height, }) + for medium in smil.findall(self._xpath_ns('.//imagestream', namespace)): + src = medium.get('src') + if not src or src in srcs: + continue + srcs.add(src) + + imgs_count += 1 + formats.append({ + 'format_id': 'imagestream-%d' % (imgs_count), + 'url': src, + 'ext': mimetype2ext(medium.get('type')), + 'acodec': 'none', + 'vcodec': 'none', + 'width': int_or_none(medium.get('width')), + 'height': int_or_none(medium.get('height')), + 'format_note': 'SMIL storyboards', + }) + return formats def _parse_smil_subtitles(self, smil, namespace=None, subtitles_lang='en'): @@ -2071,23 +2517,38 @@ class InfoExtractor(object): }) return entries - def _extract_mpd_formats(self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}): + def _extract_mpd_formats(self, *args, **kwargs): + fmts, subs = self._extract_mpd_formats_and_subtitles(*args, **kwargs) + if subs: + self._report_ignoring_subs('DASH') + return fmts + + def _extract_mpd_formats_and_subtitles( + self, mpd_url, video_id, mpd_id=None, note=None, errnote=None, + fatal=True, data=None, headers={}, query={}): res = self._download_xml_handle( mpd_url, video_id, - note=note or 'Downloading MPD manifest', - errnote=errnote or 'Failed to download MPD manifest', + note='Downloading MPD manifest' if note is None else note, + errnote='Failed to download MPD manifest' if errnote is None else errnote, fatal=fatal, data=data, headers=headers, query=query) if res is False: - return [] + return [], {} mpd_doc, urlh = res if mpd_doc is None: - return [] + return [], {} mpd_base_url = base_url(urlh.geturl()) - return self._parse_mpd_formats( + return self._parse_mpd_formats_and_subtitles( mpd_doc, mpd_id, mpd_base_url, mpd_url) - def _parse_mpd_formats(self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None): + def _parse_mpd_formats(self, *args, **kwargs): + fmts, subs = self._parse_mpd_formats_and_subtitles(*args, **kwargs) + if subs: + self._report_ignoring_subs('DASH') + return fmts + + def _parse_mpd_formats_and_subtitles( + self, mpd_doc, mpd_id=None, mpd_base_url='', mpd_url=None): """ Parse formats from MPD manifest. References: @@ -2095,8 +2556,9 @@ class InfoExtractor(object): http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip 2. https://en.wikipedia.org/wiki/Dynamic_Adaptive_Streaming_over_HTTP """ - if mpd_doc.get('type') == 'dynamic': - return [] + if not self.get_param('dynamic_mpd', True): + if mpd_doc.get('type') == 'dynamic': + return [], {} namespace = self._search_regex(r'(?i)^{([^}]+)?}MPD$', mpd_doc.tag, 'namespace', default=None) @@ -2165,7 +2627,8 @@ class InfoExtractor(object): return ms_info mpd_duration = parse_duration(mpd_doc.get('mediaPresentationDuration')) - formats = [] + formats, subtitles = [], {} + stream_numbers = {'audio': 0, 'video': 0} for period in mpd_doc.findall(_add_ns('Period')): period_duration = parse_duration(period.get('duration')) or mpd_duration period_ms_info = extract_multisegment_info(period, { @@ -2173,39 +2636,53 @@ class InfoExtractor(object): 'timescale': 1, }) for adaptation_set in period.findall(_add_ns('AdaptationSet')): - if is_drm_protected(adaptation_set): - continue adaption_set_ms_info = extract_multisegment_info(adaptation_set, period_ms_info) for representation in adaptation_set.findall(_add_ns('Representation')): - if is_drm_protected(representation): - continue representation_attrib = adaptation_set.attrib.copy() representation_attrib.update(representation.attrib) # According to [1, 5.3.7.2, Table 9, page 41], @mimeType is mandatory mime_type = representation_attrib['mimeType'] - content_type = mime_type.split('/')[0] - if content_type == 'text': - # TODO implement WebVTT downloading - pass - elif content_type in ('video', 'audio'): - base_url = '' - for element in (representation, adaptation_set, period, mpd_doc): - base_url_e = element.find(_add_ns('BaseURL')) - if base_url_e is not None: - base_url = base_url_e.text + base_url - if re.match(r'^https?://', base_url): - break - if mpd_base_url and not re.match(r'^https?://', base_url): - if not mpd_base_url.endswith('/') and not base_url.startswith('/'): - mpd_base_url += '/' - base_url = mpd_base_url + base_url - representation_id = representation_attrib.get('id') - lang = representation_attrib.get('lang') - url_el = representation.find(_add_ns('BaseURL')) - filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None) - bandwidth = int_or_none(representation_attrib.get('bandwidth')) + content_type = representation_attrib.get('contentType', mime_type.split('/')[0]) + + codecs = representation_attrib.get('codecs', '') + if content_type not in ('video', 'audio', 'text'): + if mime_type == 'image/jpeg': + content_type = mime_type + elif codecs.split('.')[0] == 'stpp': + content_type = 'text' + elif mimetype2ext(mime_type) in ('tt', 'dfxp', 'ttml', 'xml', 'json'): + content_type = 'text' + else: + self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type) + continue + + base_url = '' + for element in (representation, adaptation_set, period, mpd_doc): + base_url_e = element.find(_add_ns('BaseURL')) + if base_url_e is not None: + base_url = base_url_e.text + base_url + if re.match(r'^https?://', base_url): + break + if mpd_base_url and base_url.startswith('/'): + base_url = compat_urlparse.urljoin(mpd_base_url, base_url) + elif mpd_base_url and not re.match(r'^https?://', base_url): + if not mpd_base_url.endswith('/'): + mpd_base_url += '/' + base_url = mpd_base_url + base_url + representation_id = representation_attrib.get('id') + lang = representation_attrib.get('lang') + url_el = representation.find(_add_ns('BaseURL')) + filesize = int_or_none(url_el.attrib.get('{http://youtube.com/yt/2012/10/10}contentLength') if url_el is not None else None) + bandwidth = int_or_none(representation_attrib.get('bandwidth')) + if representation_id is not None: + format_id = representation_id + else: + format_id = content_type + if mpd_id: + format_id = mpd_id + '-' + format_id + if content_type in ('video', 'audio'): f = { - 'format_id': '%s-%s' % (mpd_id, representation_id) if mpd_id else representation_id, + 'format_id': format_id, 'manifest_url': mpd_url, 'ext': mimetype2ext(mime_type), 'width': int_or_none(representation_attrib.get('width')), @@ -2217,198 +2694,230 @@ class InfoExtractor(object): 'format_note': 'DASH %s' % content_type, 'filesize': filesize, 'container': mimetype2ext(mime_type) + '_dash', + 'manifest_stream_number': stream_numbers[content_type] + } + f.update(parse_codecs(codecs)) + stream_numbers[content_type] += 1 + elif content_type == 'text': + f = { + 'ext': mimetype2ext(mime_type), + 'manifest_url': mpd_url, + 'filesize': filesize, } - f.update(parse_codecs(representation_attrib.get('codecs'))) - representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info) - - def prepare_template(template_name, identifiers): - tmpl = representation_ms_info[template_name] - # First of, % characters outside $...$ templates - # must be escaped by doubling for proper processing - # by % operator string formatting used further (see - # https://github.com/ytdl-org/youtube-dl/issues/16867). - t = '' - in_template = False - for c in tmpl: + elif content_type == 'image/jpeg': + # See test case in VikiIE + # https://www.viki.com/videos/1175236v-choosing-spouse-by-lottery-episode-1 + f = { + 'format_id': format_id, + 'ext': 'mhtml', + 'manifest_url': mpd_url, + 'format_note': 'DASH storyboards (jpeg)', + 'acodec': 'none', + 'vcodec': 'none', + } + if is_drm_protected(adaptation_set) or is_drm_protected(representation): + f['has_drm'] = True + representation_ms_info = extract_multisegment_info(representation, adaption_set_ms_info) + + def prepare_template(template_name, identifiers): + tmpl = representation_ms_info[template_name] + # First of, % characters outside $...$ templates + # must be escaped by doubling for proper processing + # by % operator string formatting used further (see + # https://github.com/ytdl-org/youtube-dl/issues/16867). + t = '' + in_template = False + for c in tmpl: + t += c + if c == '$': + in_template = not in_template + elif c == '%' and not in_template: t += c - if c == '$': - in_template = not in_template - elif c == '%' and not in_template: - t += c - # Next, $...$ templates are translated to their - # %(...) counterparts to be used with % operator + # Next, $...$ templates are translated to their + # %(...) counterparts to be used with % operator + if representation_id is not None: t = t.replace('$RepresentationID$', representation_id) - t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t) - t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t) - t.replace('$$', '$') - return t - - # @initialization is a regular template like @media one - # so it should be handled just the same way (see - # https://github.com/ytdl-org/youtube-dl/issues/11605) - if 'initialization' in representation_ms_info: - initialization_template = prepare_template( - 'initialization', - # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and - # $Time$ shall not be included for @initialization thus - # only $Bandwidth$ remains - ('Bandwidth', )) - representation_ms_info['initialization_url'] = initialization_template % { - 'Bandwidth': bandwidth, - } + t = re.sub(r'\$(%s)\$' % '|'.join(identifiers), r'%(\1)d', t) + t = re.sub(r'\$(%s)%%([^$]+)\$' % '|'.join(identifiers), r'%(\1)\2', t) + t.replace('$$', '$') + return t + + # @initialization is a regular template like @media one + # so it should be handled just the same way (see + # https://github.com/ytdl-org/youtube-dl/issues/11605) + if 'initialization' in representation_ms_info: + initialization_template = prepare_template( + 'initialization', + # As per [1, 5.3.9.4.2, Table 15, page 54] $Number$ and + # $Time$ shall not be included for @initialization thus + # only $Bandwidth$ remains + ('Bandwidth', )) + representation_ms_info['initialization_url'] = initialization_template % { + 'Bandwidth': bandwidth, + } - def location_key(location): - return 'url' if re.match(r'^https?://', location) else 'path' - - if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info: - - media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time')) - media_location_key = location_key(media_template) - - # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$ - # can't be used at the same time - if '%(Number' in media_template and 's' not in representation_ms_info: - segment_duration = None - if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info: - segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale']) - representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration)) - representation_ms_info['fragments'] = [{ - media_location_key: media_template % { - 'Number': segment_number, - 'Bandwidth': bandwidth, - }, - 'duration': segment_duration, - } for segment_number in range( - representation_ms_info['start_number'], - representation_ms_info['total_number'] + representation_ms_info['start_number'])] - else: - # $Number*$ or $Time$ in media template with S list available - # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg - # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411 - representation_ms_info['fragments'] = [] - segment_time = 0 - segment_d = None - segment_number = representation_ms_info['start_number'] - - def add_segment_url(): - segment_url = media_template % { - 'Time': segment_time, - 'Bandwidth': bandwidth, - 'Number': segment_number, - } - representation_ms_info['fragments'].append({ - media_location_key: segment_url, - 'duration': float_or_none(segment_d, representation_ms_info['timescale']), - }) - - for num, s in enumerate(representation_ms_info['s']): - segment_time = s.get('t') or segment_time - segment_d = s['d'] + def location_key(location): + return 'url' if re.match(r'^https?://', location) else 'path' + + if 'segment_urls' not in representation_ms_info and 'media' in representation_ms_info: + + media_template = prepare_template('media', ('Number', 'Bandwidth', 'Time')) + media_location_key = location_key(media_template) + + # As per [1, 5.3.9.4.4, Table 16, page 55] $Number$ and $Time$ + # can't be used at the same time + if '%(Number' in media_template and 's' not in representation_ms_info: + segment_duration = None + if 'total_number' not in representation_ms_info and 'segment_duration' in representation_ms_info: + segment_duration = float_or_none(representation_ms_info['segment_duration'], representation_ms_info['timescale']) + representation_ms_info['total_number'] = int(math.ceil(float(period_duration) / segment_duration)) + representation_ms_info['fragments'] = [{ + media_location_key: media_template % { + 'Number': segment_number, + 'Bandwidth': bandwidth, + }, + 'duration': segment_duration, + } for segment_number in range( + representation_ms_info['start_number'], + representation_ms_info['total_number'] + representation_ms_info['start_number'])] + else: + # $Number*$ or $Time$ in media template with S list available + # Example $Number*$: http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg + # Example $Time$: https://play.arkena.com/embed/avp/v2/player/media/b41dda37-d8e7-4d3f-b1b5-9a9db578bdfe/1/129411 + representation_ms_info['fragments'] = [] + segment_time = 0 + segment_d = None + segment_number = representation_ms_info['start_number'] + + def add_segment_url(): + segment_url = media_template % { + 'Time': segment_time, + 'Bandwidth': bandwidth, + 'Number': segment_number, + } + representation_ms_info['fragments'].append({ + media_location_key: segment_url, + 'duration': float_or_none(segment_d, representation_ms_info['timescale']), + }) + + for num, s in enumerate(representation_ms_info['s']): + segment_time = s.get('t') or segment_time + segment_d = s['d'] + add_segment_url() + segment_number += 1 + for r in range(s.get('r', 0)): + segment_time += segment_d add_segment_url() segment_number += 1 - for r in range(s.get('r', 0)): - segment_time += segment_d - add_segment_url() - segment_number += 1 - segment_time += segment_d - elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info: - # No media template - # Example: https://www.youtube.com/watch?v=iXZV5uAYMJI - # or any YouTube dashsegments video - fragments = [] - segment_index = 0 - timescale = representation_ms_info['timescale'] - for s in representation_ms_info['s']: - duration = float_or_none(s['d'], timescale) - for r in range(s.get('r', 0) + 1): - segment_uri = representation_ms_info['segment_urls'][segment_index] - fragments.append({ - location_key(segment_uri): segment_uri, - 'duration': duration, - }) - segment_index += 1 - representation_ms_info['fragments'] = fragments - elif 'segment_urls' in representation_ms_info: - # Segment URLs with no SegmentTimeline - # Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091 - # https://github.com/ytdl-org/youtube-dl/pull/14844 - fragments = [] - segment_duration = float_or_none( - representation_ms_info['segment_duration'], - representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None - for segment_url in representation_ms_info['segment_urls']: - fragment = { - location_key(segment_url): segment_url, - } - if segment_duration: - fragment['duration'] = segment_duration - fragments.append(fragment) - representation_ms_info['fragments'] = fragments - # If there is a fragments key available then we correctly recognized fragmented media. - # Otherwise we will assume unfragmented media with direct access. Technically, such - # assumption is not necessarily correct since we may simply have no support for - # some forms of fragmented media renditions yet, but for now we'll use this fallback. - if 'fragments' in representation_ms_info: - f.update({ - # NB: mpd_url may be empty when MPD manifest is parsed from a string - 'url': mpd_url or base_url, - 'fragment_base_url': base_url, - 'fragments': [], - 'protocol': 'http_dash_segments', - }) - if 'initialization_url' in representation_ms_info: - initialization_url = representation_ms_info['initialization_url'] - if not f.get('url'): - f['url'] = initialization_url - f['fragments'].append({location_key(initialization_url): initialization_url}) - f['fragments'].extend(representation_ms_info['fragments']) - else: - # Assuming direct URL to unfragmented media. - f['url'] = base_url - formats.append(f) + segment_time += segment_d + elif 'segment_urls' in representation_ms_info and 's' in representation_ms_info: + # No media template + # Example: https://www.youtube.com/watch?v=iXZV5uAYMJI + # or any YouTube dashsegments video + fragments = [] + segment_index = 0 + timescale = representation_ms_info['timescale'] + for s in representation_ms_info['s']: + duration = float_or_none(s['d'], timescale) + for r in range(s.get('r', 0) + 1): + segment_uri = representation_ms_info['segment_urls'][segment_index] + fragments.append({ + location_key(segment_uri): segment_uri, + 'duration': duration, + }) + segment_index += 1 + representation_ms_info['fragments'] = fragments + elif 'segment_urls' in representation_ms_info: + # Segment URLs with no SegmentTimeline + # Example: https://www.seznam.cz/zpravy/clanek/cesko-zasahne-vitr-o-sile-vichrice-muze-byt-i-zivotu-nebezpecny-39091 + # https://github.com/ytdl-org/youtube-dl/pull/14844 + fragments = [] + segment_duration = float_or_none( + representation_ms_info['segment_duration'], + representation_ms_info['timescale']) if 'segment_duration' in representation_ms_info else None + for segment_url in representation_ms_info['segment_urls']: + fragment = { + location_key(segment_url): segment_url, + } + if segment_duration: + fragment['duration'] = segment_duration + fragments.append(fragment) + representation_ms_info['fragments'] = fragments + # If there is a fragments key available then we correctly recognized fragmented media. + # Otherwise we will assume unfragmented media with direct access. Technically, such + # assumption is not necessarily correct since we may simply have no support for + # some forms of fragmented media renditions yet, but for now we'll use this fallback. + if 'fragments' in representation_ms_info: + f.update({ + # NB: mpd_url may be empty when MPD manifest is parsed from a string + 'url': mpd_url or base_url, + 'fragment_base_url': base_url, + 'fragments': [], + 'protocol': 'http_dash_segments' if mime_type != 'image/jpeg' else 'mhtml', + }) + if 'initialization_url' in representation_ms_info: + initialization_url = representation_ms_info['initialization_url'] + if not f.get('url'): + f['url'] = initialization_url + f['fragments'].append({location_key(initialization_url): initialization_url}) + f['fragments'].extend(representation_ms_info['fragments']) else: - self.report_warning('Unknown MIME type %s in DASH manifest' % mime_type) - return formats + # Assuming direct URL to unfragmented media. + f['url'] = base_url + if content_type in ('video', 'audio') or mime_type == 'image/jpeg': + formats.append(f) + elif content_type == 'text': + subtitles.setdefault(lang or 'und', []).append(f) - def _extract_ism_formats(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}): + return formats, subtitles + + def _extract_ism_formats(self, *args, **kwargs): + fmts, subs = self._extract_ism_formats_and_subtitles(*args, **kwargs) + if subs: + self._report_ignoring_subs('ISM') + return fmts + + def _extract_ism_formats_and_subtitles(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}): res = self._download_xml_handle( ism_url, video_id, - note=note or 'Downloading ISM manifest', - errnote=errnote or 'Failed to download ISM manifest', + note='Downloading ISM manifest' if note is None else note, + errnote='Failed to download ISM manifest' if errnote is None else errnote, fatal=fatal, data=data, headers=headers, query=query) if res is False: - return [] + return [], {} ism_doc, urlh = res if ism_doc is None: - return [] + return [], {} - return self._parse_ism_formats(ism_doc, urlh.geturl(), ism_id) + return self._parse_ism_formats_and_subtitles(ism_doc, urlh.geturl(), ism_id) - def _parse_ism_formats(self, ism_doc, ism_url, ism_id=None): + def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None): """ Parse formats from ISM manifest. References: 1. [MS-SSTR]: Smooth Streaming Protocol, https://msdn.microsoft.com/en-us/library/ff469518.aspx """ - if ism_doc.get('IsLive') == 'TRUE' or ism_doc.find('Protection') is not None: - return [] + if ism_doc.get('IsLive') == 'TRUE': + return [], {} duration = int(ism_doc.attrib['Duration']) timescale = int_or_none(ism_doc.get('TimeScale')) or 10000000 formats = [] + subtitles = {} for stream in ism_doc.findall('StreamIndex'): stream_type = stream.get('Type') - if stream_type not in ('video', 'audio'): + if stream_type not in ('video', 'audio', 'text'): continue url_pattern = stream.attrib['Url'] stream_timescale = int_or_none(stream.get('TimeScale')) or timescale stream_name = stream.get('Name') + stream_language = stream.get('Language', 'und') for track in stream.findall('QualityLevel'): - fourcc = track.get('FourCC', 'AACL' if track.get('AudioTag') == '255' else None) + fourcc = track.get('FourCC') or ('AACL' if track.get('AudioTag') == '255' else None) # TODO: add support for WVC1 and WMAP - if fourcc not in ('H264', 'AVC1', 'AACL'): + if fourcc not in ('H264', 'AVC1', 'AACL', 'TTML'): self.report_warning('%s is not a supported codec' % fourcc) continue tbr = int(track.attrib['Bitrate']) // 1000 @@ -2451,35 +2960,55 @@ class InfoExtractor(object): format_id.append(stream_name) format_id.append(compat_str(tbr)) - formats.append({ - 'format_id': '-'.join(format_id), - 'url': ism_url, - 'manifest_url': ism_url, - 'ext': 'ismv' if stream_type == 'video' else 'isma', - 'width': width, - 'height': height, - 'tbr': tbr, - 'asr': sampling_rate, - 'vcodec': 'none' if stream_type == 'audio' else fourcc, - 'acodec': 'none' if stream_type == 'video' else fourcc, - 'protocol': 'ism', - 'fragments': fragments, - '_download_params': { - 'duration': duration, - 'timescale': stream_timescale, - 'width': width or 0, - 'height': height or 0, - 'fourcc': fourcc, - 'codec_private_data': track.get('CodecPrivateData'), - 'sampling_rate': sampling_rate, - 'channels': int_or_none(track.get('Channels', 2)), - 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)), - 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)), - }, - }) - return formats + if stream_type == 'text': + subtitles.setdefault(stream_language, []).append({ + 'ext': 'ismt', + 'protocol': 'ism', + 'url': ism_url, + 'manifest_url': ism_url, + 'fragments': fragments, + '_download_params': { + 'stream_type': stream_type, + 'duration': duration, + 'timescale': stream_timescale, + 'fourcc': fourcc, + 'language': stream_language, + 'codec_private_data': track.get('CodecPrivateData'), + } + }) + elif stream_type in ('video', 'audio'): + formats.append({ + 'format_id': '-'.join(format_id), + 'url': ism_url, + 'manifest_url': ism_url, + 'ext': 'ismv' if stream_type == 'video' else 'isma', + 'width': width, + 'height': height, + 'tbr': tbr, + 'asr': sampling_rate, + 'vcodec': 'none' if stream_type == 'audio' else fourcc, + 'acodec': 'none' if stream_type == 'video' else fourcc, + 'protocol': 'ism', + 'fragments': fragments, + 'has_drm': ism_doc.find('Protection') is not None, + '_download_params': { + 'stream_type': stream_type, + 'duration': duration, + 'timescale': stream_timescale, + 'width': width or 0, + 'height': height or 0, + 'fourcc': fourcc, + 'language': stream_language, + 'codec_private_data': track.get('CodecPrivateData'), + 'sampling_rate': sampling_rate, + 'channels': int_or_none(track.get('Channels', 2)), + 'bits_per_sample': int_or_none(track.get('BitsPerSample', 16)), + 'nal_unit_length_field': int_or_none(track.get('NALUnitLengthField', 4)), + }, + }) + return formats, subtitles - def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None): + def _parse_html5_media_entries(self, base_url, webpage, video_id, m3u8_id=None, m3u8_entry_protocol='m3u8', mpd_id=None, preference=None, quality=None): def absolute_url(item_url): return urljoin(base_url, item_url) @@ -2502,7 +3031,7 @@ class InfoExtractor(object): formats = self._extract_m3u8_formats( full_url, video_id, ext='mp4', entry_protocol=m3u8_entry_protocol, m3u8_id=m3u8_id, - preference=preference, fatal=False) + preference=preference, quality=quality, fatal=False) elif ext == 'mpd': is_plain_url = False formats = self._extract_mpd_formats( @@ -2602,7 +3131,13 @@ class InfoExtractor(object): entries.append(media_info) return entries - def _extract_akamai_formats(self, manifest_url, video_id, hosts={}): + def _extract_akamai_formats(self, *args, **kwargs): + fmts, subs = self._extract_akamai_formats_and_subtitles(*args, **kwargs) + if subs: + self._report_ignoring_subs('akamai') + return fmts + + def _extract_akamai_formats_and_subtitles(self, manifest_url, video_id, hosts={}): signed = 'hdnea=' in manifest_url if not signed: # https://learn.akamai.com/en-us/webhelp/media-services-on-demand/stream-packaging-user-guide/GUID-BE6C0F73-1E06-483B-B0EA-57984B91B7F9.html @@ -2611,6 +3146,7 @@ class InfoExtractor(object): '', manifest_url).strip('?') formats = [] + subtitles = {} hdcore_sign = 'hdcore=3.7.0' f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m') @@ -2629,10 +3165,11 @@ class InfoExtractor(object): hls_host = hosts.get('hls') if hls_host: m3u8_url = re.sub(r'(https?://)[^/]+', r'\1' + hls_host, m3u8_url) - m3u8_formats = self._extract_m3u8_formats( + m3u8_formats, m3u8_subtitles = self._extract_m3u8_formats_and_subtitles( m3u8_url, video_id, 'mp4', 'm3u8_native', m3u8_id='hls', fatal=False) formats.extend(m3u8_formats) + subtitles = self._merge_subtitles(subtitles, m3u8_subtitles) http_host = hosts.get('http') if http_host and m3u8_formats and not signed: @@ -2656,7 +3193,7 @@ class InfoExtractor(object): formats.append(http_f) i += 1 - return formats + return formats, subtitles def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]): query = compat_urlparse.urlparse(url).query @@ -2879,7 +3416,7 @@ class InfoExtractor(object): if fatal: raise ExtractorError(msg) else: - self._downloader.report_warning(msg) + self.report_warning(msg) return res def _float(self, v, name, fatal=False, **kwargs): @@ -2889,7 +3426,7 @@ class InfoExtractor(object): if fatal: raise ExtractorError(msg) else: - self._downloader.report_warning(msg) + self.report_warning(msg) return res def _set_cookie(self, domain, name, value, expire_time=None, port=None, @@ -2963,14 +3500,40 @@ class InfoExtractor(object): return not any_restricted def extract_subtitles(self, *args, **kwargs): - if (self._downloader.params.get('writesubtitles', False) - or self._downloader.params.get('listsubtitles')): + if (self.get_param('writesubtitles', False) + or self.get_param('listsubtitles')): return self._get_subtitles(*args, **kwargs) return {} def _get_subtitles(self, *args, **kwargs): raise NotImplementedError('This method must be implemented by subclasses') + def extract_comments(self, *args, **kwargs): + if not self.get_param('getcomments'): + return None + generator = self._get_comments(*args, **kwargs) + + def extractor(): + comments = [] + try: + while True: + comments.append(next(generator)) + except KeyboardInterrupt: + interrupted = True + self.to_screen('Interrupted by user') + except StopIteration: + interrupted = False + comment_count = len(comments) + self.to_screen(f'Extracted {comment_count} comments') + return { + 'comments': comments, + 'comment_count': None if interrupted else comment_count + } + return extractor + + def _get_comments(self, *args, **kwargs): + raise NotImplementedError('This method must be implemented by subclasses') + @staticmethod def _merge_subtitle_items(subtitle_list1, subtitle_list2): """ Merge subtitle items for one language. Items with duplicated URLs @@ -2981,16 +3544,18 @@ class InfoExtractor(object): return ret @classmethod - def _merge_subtitles(cls, subtitle_dict1, subtitle_dict2): - """ Merge two subtitle dictionaries, language by language. """ - ret = dict(subtitle_dict1) - for lang in subtitle_dict2: - ret[lang] = cls._merge_subtitle_items(subtitle_dict1.get(lang, []), subtitle_dict2[lang]) - return ret + def _merge_subtitles(cls, *dicts, target=None): + """ Merge subtitle dictionaries, language by language. """ + if target is None: + target = {} + for d in dicts: + for lang, subs in d.items(): + target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs) + return target def extract_automatic_captions(self, *args, **kwargs): - if (self._downloader.params.get('writeautomaticsub', False) - or self._downloader.params.get('listsubtitles')): + if (self.get_param('writeautomaticsub', False) + or self.get_param('listsubtitles')): return self._get_automatic_captions(*args, **kwargs) return {} @@ -2998,9 +3563,11 @@ class InfoExtractor(object): raise NotImplementedError('This method must be implemented by subclasses') def mark_watched(self, *args, **kwargs): - if (self._downloader.params.get('mark_watched', False) - and (self._get_login_info()[0] is not None - or self._downloader.params.get('cookiefile') is not None)): + if not self.get_param('mark_watched', False): + return + if (self._get_login_info()[0] is not None + or self.get_param('cookiefile') + or self.get_param('cookiesfrombrowser')): self._mark_watched(*args, **kwargs) def _mark_watched(self, *args, **kwargs): @@ -3008,7 +3575,7 @@ class InfoExtractor(object): def geo_verification_headers(self): headers = {} - geo_verification_proxy = self._downloader.params.get('geo_verification_proxy') + geo_verification_proxy = self.get_param('geo_verification_proxy') if geo_verification_proxy: headers['Ytdl-request-proxy'] = geo_verification_proxy return headers @@ -3019,6 +3586,33 @@ class InfoExtractor(object): def _generic_title(self, url): return compat_urllib_parse_unquote(os.path.splitext(url_basename(url))[0]) + @staticmethod + def _availability(is_private=None, needs_premium=None, needs_subscription=None, needs_auth=None, is_unlisted=None): + all_known = all(map( + lambda x: x is not None, + (is_private, needs_premium, needs_subscription, needs_auth, is_unlisted))) + return ( + 'private' if is_private + else 'premium_only' if needs_premium + else 'subscriber_only' if needs_subscription + else 'needs_auth' if needs_auth + else 'unlisted' if is_unlisted + else 'public' if all_known + else None) + + def _configuration_arg(self, key, default=NO_DEFAULT, casesense=False): + ''' + @returns A list of values for the extractor argument given by "key" + or "default" if no such key is present + @param default The default value to return when the key is not present (default: []) + @param casesense When false, the values are converted to lower case + ''' + val = traverse_obj( + self._downloader.params, ('extractor_args', self.ie_key().lower(), key)) + if val is None: + return [] if default is NO_DEFAULT else default + return list(val) if casesense else [x.lower() for x in val] + class SearchInfoExtractor(InfoExtractor): """ @@ -3051,12 +3645,19 @@ class SearchInfoExtractor(InfoExtractor): if n <= 0: raise ExtractorError('invalid download number %s for query "%s"' % (n, query)) elif n > self._MAX_RESULTS: - self._downloader.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) + self.report_warning('%s returns max %i results (you requested %i)' % (self._SEARCH_KEY, self._MAX_RESULTS, n)) n = self._MAX_RESULTS return self._get_n_results(query, n) def _get_n_results(self, query, n): - """Get a specified number of results for a query""" + """Get a specified number of results for a query. + Either this function or _search_results must be overridden by subclasses """ + return self.playlist_result( + itertools.islice(self._search_results(query), 0, None if n == float('inf') else n), + query, query) + + def _search_results(self, query): + """Returns an iterator of search results""" raise NotImplementedError('This method must be implemented by subclasses') @property |