diff options
author | Jesús <heckyel@hyperbola.info> | 2022-12-01 23:33:30 +0800 |
---|---|---|
committer | Jesús <heckyel@hyperbola.info> | 2022-12-01 23:33:30 +0800 |
commit | ef1a420d6de7876b7b6732abc8ab78351c5a2bfc (patch) | |
tree | 9ba7d8409aa5baa696f5fb10db5d395c2f050276 /yt_dlp/utils.py | |
parent | 16e8548f6a720a78679e417a20a300db2036bf6c (diff) | |
parent | ddf1e22d48530819d60220d0bdc36e20f5b8483b (diff) | |
download | hypervideo-pre-ef1a420d6de7876b7b6732abc8ab78351c5a2bfc.tar.lz hypervideo-pre-ef1a420d6de7876b7b6732abc8ab78351c5a2bfc.tar.xz hypervideo-pre-ef1a420d6de7876b7b6732abc8ab78351c5a2bfc.zip |
update from upstream 2022-12-01 UTC+8
Diffstat (limited to 'yt_dlp/utils.py')
-rw-r--r-- | yt_dlp/utils.py | 1255 |
1 files changed, 992 insertions, 263 deletions
diff --git a/yt_dlp/utils.py b/yt_dlp/utils.py index fba64be5a..51d74363b 100644 --- a/yt_dlp/utils.py +++ b/yt_dlp/utils.py @@ -1,11 +1,12 @@ +import asyncio import atexit import base64 import binascii import calendar import codecs import collections +import collections.abc import contextlib -import ctypes import datetime import email.header import email.utils @@ -18,6 +19,7 @@ import html.parser import http.client import http.cookiejar import importlib.util +import inspect import io import itertools import json @@ -39,13 +41,14 @@ import tempfile import time import traceback import types +import unicodedata import urllib.error import urllib.parse import urllib.request import xml.etree.ElementTree import zlib -from .compat import asyncio, functools # isort: split +from .compat import functools # isort: split from .compat import ( compat_etree_fromstring, compat_expanduser, @@ -146,22 +149,22 @@ MONTH_NAMES = { 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], + # these follow the genitive grammatical case (dopełniacz) + # some websites might be using nominative, which will require another month list + # https://en.wikibooks.org/wiki/Polish/Noun_cases + 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca', + 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'], } -KNOWN_EXTENSIONS = ( - 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', - 'flv', 'f4v', 'f4a', 'f4b', - 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', - 'mkv', 'mka', 'mk3d', - 'avi', 'divx', - 'mov', - 'asf', 'wmv', 'wma', - '3gp', '3g2', - 'mp3', - 'flac', - 'ape', - 'wav', - 'f4f', 'f4m', 'm3u8', 'smil') +# From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42 +TIMEZONE_NAMES = { + 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0, + 'AST': -4, 'ADT': -3, # Atlantic (used in Canada) + 'EST': -5, 'EDT': -4, # Eastern + 'CST': -6, 'CDT': -5, # Central + 'MST': -7, 'MDT': -6, # Mountain + 'PST': -8, 'PDT': -7 # Pacific +} # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', @@ -221,6 +224,7 @@ DATE_FORMATS_DAY_FIRST.extend([ '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', + '%d-%m-%Y %H:%M', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) @@ -233,7 +237,7 @@ DATE_FORMATS_MONTH_FIRST.extend([ ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" -JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>' +JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>\s*(?P<json_ld>{.+?}|\[.+?\])\s*</script>' NUMBER_RE = r'\d+(?:\.\d+)?' @@ -409,18 +413,20 @@ def get_elements_html_by_attribute(*args, **kwargs): return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)] -def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True): +def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w:.-]+', escape_value=True): """ Return the text (content) and the html (whole) of the tag with the specified attribute in the passed HTML document """ + if not value: + return quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?' value = re.escape(value) if escape_value else value partial_element_re = rf'''(?x) - <(?P<tag>[a-zA-Z0-9:._-]+) + <(?P<tag>{tag}) (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)? \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q) ''' @@ -476,6 +482,7 @@ class HTMLBreakOnClosingTagParser(html.parser.HTMLParser): raise self.HTMLBreakOnClosingTagException() +# XXX: This should be far less strict def get_element_text_and_html_by_tag(tag, html): """ For the first element with the specified tag in the passed HTML document @@ -520,6 +527,7 @@ class HTMLAttributeParser(html.parser.HTMLParser): def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) + raise compat_HTMLParseError('done') class HTMLListAttrsParser(html.parser.HTMLParser): @@ -593,9 +601,14 @@ class LenientJSONDecoder(json.JSONDecoder): def decode(self, s): if self.transform_source: s = self.transform_source(s) - if self.ignore_extra: - return self.raw_decode(s.lstrip())[0] - return super().decode(s) + try: + if self.ignore_extra: + return self.raw_decode(s.lstrip())[0] + return super().decode(s) + except json.JSONDecodeError as e: + if e.pos is not None: + raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos) + raise def sanitize_open(filename, open_mode): @@ -611,7 +624,10 @@ def sanitize_open(filename, open_mode): if filename == '-': if sys.platform == 'win32': import msvcrt - msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) + + # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout + with contextlib.suppress(io.UnsupportedOperation): + msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) for attempt in range(2): @@ -657,6 +673,9 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT): return ACCENT_CHARS[char] elif not restricted and char == '\n': return '\0 ' + elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\': + # Replace with their full-width unicode counterparts + return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0)) elif char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': @@ -669,11 +688,14 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT): return '\0_' return char + # Replace look-alike Unicode glyphs + if restricted and (is_id is NO_DEFAULT or not is_id): + s = unicodedata.normalize('NFKC', s) s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps result = ''.join(map(replace_insane, s)) if is_id is NO_DEFAULT: - result = re.sub('(\0.)(?:(?=\\1)..)+', r'\1', result) # Remove repeated substitute chars - STRIP_RE = '(?:\0.|[ _-])*' + result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars + STRIP_RE = r'(?:\0.|[ _-])*' result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end result = result.replace('\0', '') or '_' @@ -715,13 +737,13 @@ def sanitize_path(s, force=False): return os.path.join(*sanitized_path) -def sanitize_url(url): +def sanitize_url(url, *, scheme='http'): # Prepend protocol-less URLs with `http:` scheme in order to mitigate # the number of unwanted failures due to missing protocol if url is None: return elif url.startswith('//'): - return 'http:%s' % url + return f'{scheme}:{url}' # Fix some common typos seen so far COMMON_TYPOS = ( # https://github.com/ytdl-org/youtube-dl/issues/15649 @@ -780,8 +802,8 @@ def _htmlentity_transform(entity_with_semicolon): if entity in html.entities.name2codepoint: return chr(html.entities.name2codepoint[entity]) - # TODO: HTML5 allows entities without a semicolon. For example, - # 'Éric' should be decoded as 'Éric'. + # TODO: HTML5 allows entities without a semicolon. + # E.g. 'Éric' should be decoded as 'Éric'. if entity_with_semicolon in html.entities.html5: return html.entities.html5[entity_with_semicolon] @@ -822,8 +844,8 @@ def escapeHTML(text): def process_communicate_or_kill(p, *args, **kwargs): - write_string('DeprecationWarning: yt_dlp.utils.process_communicate_or_kill is deprecated ' - 'and may be removed in a future version. Use yt_dlp.utils.Popen.communicate_or_kill instead') + deprecation_warning(f'"{__name__}.process_communicate_or_kill" is deprecated and may be removed ' + f'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead') return Popen.communicate_or_kill(p, *args, **kwargs) @@ -834,12 +856,35 @@ class Popen(subprocess.Popen): else: _startupinfo = None - def __init__(self, *args, text=False, **kwargs): + @staticmethod + def _fix_pyinstaller_ld_path(env): + """Restore LD_LIBRARY_PATH when using PyInstaller + Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations + https://github.com/yt-dlp/yt-dlp/issues/4573 + """ + if not hasattr(sys, '_MEIPASS'): + return + + def _fix(key): + orig = env.get(f'{key}_ORIG') + if orig is None: + env.pop(key, None) + else: + env[key] = orig + + _fix('LD_LIBRARY_PATH') # Linux + _fix('DYLD_LIBRARY_PATH') # macOS + + def __init__(self, *args, env=None, text=False, **kwargs): + if env is None: + env = os.environ.copy() + self._fix_pyinstaller_ld_path(env) + if text is True: kwargs['universal_newlines'] = True # For 3.6 compatibility kwargs.setdefault('encoding', 'utf-8') kwargs.setdefault('errors', 'replace') - super().__init__(*args, **kwargs, startupinfo=self._startupinfo) + super().__init__(*args, env=env, **kwargs, startupinfo=self._startupinfo) def communicate_or_kill(self, *args, **kwargs): try: @@ -854,10 +899,11 @@ class Popen(subprocess.Popen): self.wait(timeout=timeout) @classmethod - def run(cls, *args, **kwargs): + def run(cls, *args, timeout=None, **kwargs): with cls(*args, **kwargs) as proc: - stdout, stderr = proc.communicate_or_kill() - return stdout or '', stderr or '', proc.returncode + default = '' if proc.text_mode else b'' + stdout, stderr = proc.communicate_or_kill(timeout=timeout) + return stdout or default, stderr or default, proc.returncode def get_subprocess_encoding(): @@ -944,22 +990,42 @@ def make_HTTPS_handler(params, **kwargs): context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998 context.set_ciphers('DEFAULT') + elif ( + sys.version_info < (3, 10) + and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1) + and not ssl.OPENSSL_VERSION.startswith('LibreSSL') + ): + # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1]. + # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting + # in some situations [2][3]. + # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely + # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe. + # LibreSSL is excluded until further investigation due to cipher support issues [5][6]. + # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536 + # 2. https://github.com/yt-dlp/yt-dlp/issues/4627 + # 3. https://github.com/yt-dlp/yt-dlp/pull/5294 + # 4. https://peps.python.org/pep-0644/ + # 5. https://peps.python.org/pep-0644/#libressl-support + # 6. https://github.com/yt-dlp/yt-dlp/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368 + context.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM') + context.minimum_version = ssl.TLSVersion.TLSv1_2 context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE if opts_check_certificate: if has_certifi and 'no-certifi' not in params.get('compat_opts', []): context.load_verify_locations(cafile=certifi.where()) - try: - context.load_default_certs() - # Work around the issue in load_default_certs when there are bad certificates. See: - # https://github.com/yt-dlp/yt-dlp/issues/1060, - # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312 - except ssl.SSLError: - # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151 - if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'): - for storename in ('CA', 'ROOT'): - _ssl_load_windows_store_certs(context, storename) - context.set_default_verify_paths() + else: + try: + context.load_default_certs() + # Work around the issue in load_default_certs when there are bad certificates. See: + # https://github.com/yt-dlp/yt-dlp/issues/1060, + # https://bugs.python.org/issue35665, https://bugs.python.org/issue45312 + except ssl.SSLError: + # enum_certificates is not present in mingw python. See https://github.com/yt-dlp/yt-dlp/issues/1151 + if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'): + for storename in ('CA', 'ROOT'): + _ssl_load_windows_store_certs(context, storename) + context.set_default_verify_paths() client_certfile = params.get('client_certificate') if client_certfile: @@ -1028,13 +1094,16 @@ class ExtractorError(YoutubeDLError): self.exc_info = sys.exc_info() # preserve original exception if isinstance(self.exc_info[1], ExtractorError): self.exc_info = self.exc_info[1].exc_info + super().__init__(self.__msg) - super().__init__(''.join(( - format_field(ie, None, '[%s] '), - format_field(video_id, None, '%s: '), - msg, - format_field(cause, None, ' (caused by %r)'), - '' if expected else bug_reports_message()))) + @property + def __msg(self): + return ''.join(( + format_field(self.ie, None, '[%s] '), + format_field(self.video_id, None, '%s: '), + self.orig_msg, + format_field(self.cause, None, ' (caused by %r)'), + '' if self.expected else bug_reports_message())) def format_traceback(self): return join_nonempty( @@ -1042,6 +1111,12 @@ class ExtractorError(YoutubeDLError): self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]), delim='\n') or None + def __setattr__(self, name, value): + super().__setattr__(name, value) + if getattr(self, 'msg', None) and name not in ('msg', 'args'): + self.msg = self.__msg or type(self).__name__ + self.args = (self.msg, ) # Cannot be property + class UnsupportedError(ExtractorError): def __init__(self, url): @@ -1068,6 +1143,14 @@ class GeoRestrictedError(ExtractorError): self.countries = countries +class UserNotLive(ExtractorError): + """Error when a channel/user is not live""" + + def __init__(self, msg=None, **kwargs): + kwargs['expected'] = True + super().__init__(msg or 'The channel is not currently live', **kwargs) + + class DownloadError(YoutubeDLError): """Download Error exception. @@ -1458,6 +1541,10 @@ class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler): raise +def is_path_like(f): + return isinstance(f, (str, bytes, os.PathLike)) + + class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar): """ See [1] for cookie file format. @@ -1476,7 +1563,7 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar): def __init__(self, filename=None, *args, **kwargs): super().__init__(None, *args, **kwargs) - if self.is_path(filename): + if is_path_like(filename): filename = os.fspath(filename) self.filename = filename @@ -1484,13 +1571,9 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar): def _true_or_false(cndn): return 'TRUE' if cndn else 'FALSE' - @staticmethod - def is_path(file): - return isinstance(file, (str, bytes, os.PathLike)) - @contextlib.contextmanager def open(self, file, *, write=False): - if self.is_path(file): + if is_path_like(file): with open(file, 'w' if write else 'r', encoding='utf-8') as f: yield f else: @@ -1571,7 +1654,7 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar): if f'{line.strip()} '[0] in '[{"': raise http.cookiejar.LoadError( 'Cookies file must be Netscape formatted, not JSON. See ' - 'https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl') + 'https://github.com/yt-dlp/yt-dlp/wiki/FAQ#how-do-i-pass-cookies-to-yt-dlp') write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n') continue cf.seek(0) @@ -1678,7 +1761,11 @@ def extract_timezone(date_str): $) ''', date_str) if not m: - timezone = datetime.timedelta() + m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) + timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip()) + if timezone is not None: + date_str = date_str[:-len(m.group('tz'))] + timezone = datetime.timedelta(hours=timezone or 0) else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): @@ -1740,7 +1827,8 @@ def unified_timestamp(date_str, day_first=True): if date_str is None: return None - date_str = re.sub(r'[,|]', '', date_str) + date_str = re.sub(r'\s+', ' ', re.sub( + r'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str)) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) @@ -1762,9 +1850,10 @@ def unified_timestamp(date_str, day_first=True): with contextlib.suppress(ValueError): dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) + timetuple = email.utils.parsedate_tz(date_str) if timetuple: - return calendar.timegm(timetuple) + pm_delta * 3600 + return calendar.timegm(timetuple) + pm_delta * 3600 - timezone.total_seconds() def determine_ext(url, default_ext='unknown_video'): @@ -1905,15 +1994,35 @@ class DateRange: def __str__(self): return f'{self.start.isoformat()} - {self.end.isoformat()}' + def __eq__(self, other): + return (isinstance(other, DateRange) + and self.start == other.start and self.end == other.end) + def platform_name(): """ Returns the platform name as a str """ - res = platform.platform() - if isinstance(res, bytes): - res = res.decode(preferredencoding()) + deprecation_warning(f'"{__name__}.platform_name" is deprecated, use "platform.platform" instead') + return platform.platform() - assert isinstance(res, str) - return res + +@functools.cache +def system_identifier(): + python_implementation = platform.python_implementation() + if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'): + python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3] + libc_ver = [] + with contextlib.suppress(OSError): # We may not have access to the executable + libc_ver = platform.libc_ver() + + return 'Python %s (%s %s %s) - %s (%s%s)' % ( + platform.python_version(), + python_implementation, + platform.machine(), + platform.architecture()[0], + platform.platform(), + ssl.OPENSSL_VERSION, + format_field(join_nonempty(*libc_ver, delim=' '), None, ', %s'), + ) @functools.cache @@ -1943,6 +2052,23 @@ def write_string(s, out=None, encoding=None): out.flush() +def deprecation_warning(msg, *, printer=None, stacklevel=0, **kwargs): + from . import _IN_CLI + if _IN_CLI: + if msg in deprecation_warning._cache: + return + deprecation_warning._cache.add(msg) + if printer: + return printer(f'{msg}{bug_reports_message()}', **kwargs) + return write_string(f'ERROR: {msg}{bug_reports_message()}\n', **kwargs) + else: + import warnings + warnings.warn(DeprecationWarning(msg), stacklevel=stacklevel + 3) + + +deprecation_warning._cache = set() + + def bytes_to_intlist(bs): if not bs: return [] @@ -1967,6 +2093,7 @@ class LockingUnsupportedError(OSError): # Cross-platform file locking if sys.platform == 'win32': + import ctypes import ctypes.wintypes import msvcrt @@ -2170,15 +2297,24 @@ def format_bytes(bytes): return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A' -def lookup_unit_table(unit_table, s): +def lookup_unit_table(unit_table, s, strict=False): + num_re = NUMBER_RE if strict else NUMBER_RE.replace(R'\.', '[,.]') units_re = '|'.join(re.escape(u) for u in unit_table) - m = re.match( - r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) + m = (re.fullmatch if strict else re.match)( + rf'(?P<num>{num_re})\s*(?P<unit>{units_re})\b', s) if not m: return None - num_str = m.group('num').replace(',', '.') + + num = float(m.group('num').replace(',', '.')) mult = unit_table[m.group('unit')] - return int(float(num_str) * mult) + return round(num * mult) + + +def parse_bytes(s): + """Parse a string indicating a byte quantity into an integer""" + return lookup_unit_table( + {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])}, + s.upper(), strict=True) def parse_filesize(s): @@ -2346,9 +2482,10 @@ def fix_xml_ampersands(xml_str): def setproctitle(title): assert isinstance(title, str) - # ctypes in Jython is not complete - # http://bugs.jython.org/issue2148 - if sys.platform.startswith('java'): + # Workaround for https://github.com/yt-dlp/yt-dlp/issues/4541 + try: + import ctypes + except ImportError: return try: @@ -2387,8 +2524,11 @@ def remove_quotes(s): def get_domain(url): - domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url) - return domain.group('domain') if domain else None + """ + This implementation is inconsistent, but is kept for compatibility. + Use this only for "webpage_url_domain" + """ + return remove_start(urllib.parse.urlparse(url).netloc, 'www.') or None def url_basename(url): @@ -2397,7 +2537,7 @@ def url_basename(url): def base_url(url): - return re.match(r'https?://[^?#&]+/', url).group() + return re.match(r'https?://[^?#]+/', url).group() def urljoin(base, path): @@ -2482,9 +2622,13 @@ def strftime_or_none(timestamp, date_format, default=None): datetime_object = None try: if isinstance(timestamp, (int, float)): # unix timestamp - datetime_object = datetime.datetime.utcfromtimestamp(timestamp) + # Using naive datetime here can break timestamp() in Windows + # Ref: https://github.com/yt-dlp/yt-dlp/issues/5185, https://github.com/python/cpython/issues/94414 + datetime_object = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc) elif isinstance(timestamp, str): # assume YYYYMMDD datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d') + date_format = re.sub( # Support %s on windows + r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format) return datetime_object.strftime(date_format) except (ValueError, TypeError, AttributeError): return default @@ -2571,9 +2715,7 @@ def check_executable(exe, args=[]): return exe -def _get_exe_version_output(exe, args, *, to_screen=None): - if to_screen: - to_screen(f'Checking exe version: {shell_quote([exe] + args)}') +def _get_exe_version_output(exe, args): try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if yt-dlp is run in the background. @@ -2647,7 +2789,7 @@ class LazyList(collections.abc.Sequence): @staticmethod def _reverse_index(x): - return None if x is None else -(x + 1) + return None if x is None else ~x def __getitem__(self, idx): if isinstance(idx, slice): @@ -2825,10 +2967,10 @@ class PlaylistEntries: self.is_exhausted = True requested_entries = info_dict.get('requested_entries') - self.is_incomplete = bool(requested_entries) + self.is_incomplete = requested_entries is not None if self.is_incomplete: assert self.is_exhausted - self._entries = [self.MissingEntry] * max(requested_entries) + self._entries = [self.MissingEntry] * max(requested_entries or [0]) for i, entry in zip(requested_entries, entries): self._entries[i - 1] = entry elif isinstance(entries, (list, PagedList, LazyList)): @@ -2897,7 +3039,7 @@ class PlaylistEntries: if not self.is_incomplete: raise self.IndexError() if entry is self.MissingEntry: - raise EntryNotInPlaylist(f'Entry {i} cannot be found') + raise EntryNotInPlaylist(f'Entry {i + 1} cannot be found') return entry else: def get_entry(i): @@ -2977,8 +3119,8 @@ def escape_url(url): ).geturl() -def parse_qs(url): - return urllib.parse.parse_qs(urllib.parse.urlparse(url).query) +def parse_qs(url, **kwargs): + return urllib.parse.parse_qs(urllib.parse.urlparse(url).query, **kwargs) def read_batch_urls(batch_fd): @@ -2993,7 +3135,7 @@ def read_batch_urls(batch_fd): if not url or url.startswith(('#', ';', ']')): return False # "#" cannot be stripped out since it is part of the URI - # However, it can be safely stipped out if follwing a whitespace + # However, it can be safely stripped out if following a whitespace return re.split(r'\s#', url, 1)[0].rstrip() with contextlib.closing(batch_fd) as fd: @@ -3086,6 +3228,10 @@ def multipart_encode(data, boundary=None): return out, content_type +def variadic(x, allowed_types=(str, bytes, dict)): + return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,) + + def dict_get(d, key_or_keys, default=None, skip_false_values=True): for val in map(d.get, variadic(key_or_keys)): if val is not None and (val or not skip_false_values): @@ -3097,7 +3243,7 @@ def try_call(*funcs, expected_type=None, args=[], kwargs={}): for f in funcs: try: val = f(*args, **kwargs) - except (AttributeError, KeyError, TypeError, IndexError, ZeroDivisionError): + except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError): pass else: if expected_type is None or isinstance(val, expected_type): @@ -3173,8 +3319,10 @@ def strip_jsonp(code): r'\g<callback_data>', code) -def js_to_json(code, vars={}): +def js_to_json(code, vars={}, *, strict=False): # vars is a dict of var, val pairs to substitute + STRING_QUOTES = '\'"' + STRING_RE = '|'.join(rf'{q}(?:\\.|[^\\{q}])*{q}' for q in STRING_QUOTES) COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n' SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*' INTEGER_TABLE = ( @@ -3182,6 +3330,15 @@ def js_to_json(code, vars={}): (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8), ) + def process_escape(match): + JSON_PASSTHROUGH_ESCAPES = R'"\bfnrtu' + escape = match.group(1) or match.group(2) + + return (Rf'\{escape}' if escape in JSON_PASSTHROUGH_ESCAPES + else R'\u00' if escape == 'x' + else '' if escape == '\n' + else escape) + def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): @@ -3189,42 +3346,42 @@ def js_to_json(code, vars={}): elif v in ('undefined', 'void 0'): return 'null' elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',': - return "" - - if v[0] in ("'", '"'): - v = re.sub(r'(?s)\\.|"', lambda m: { - '"': '\\"', - "\\'": "'", - '\\\n': '', - '\\x': '\\u00', - }.get(m.group(0), m.group(0)), v[1:-1]) - else: - for regex, base in INTEGER_TABLE: - im = re.match(regex, v) - if im: - i = int(im.group(1), base) - return '"%d":' % i if v.endswith(':') else '%d' % i + return '' + + if v[0] in STRING_QUOTES: + escaped = re.sub(r'(?s)(")|\\(.)', process_escape, v[1:-1]) + return f'"{escaped}"' + + for regex, base in INTEGER_TABLE: + im = re.match(regex, v) + if im: + i = int(im.group(1), base) + return f'"{i}":' if v.endswith(':') else str(i) - if v in vars: - return vars[v] + if v in vars: + return json.dumps(vars[v]) - return '"%s"' % v + if not strict: + return f'"{v}"' + + raise ValueError(f'Unknown value: {v}') def create_map(mobj): return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars)))) - code = re.sub(r'new Date\((".+")\)', r'\g<1>', code) code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code) + if not strict: + code = re.sub(r'new Date\((".+")\)', r'\g<1>', code) + code = re.sub(r'new \w+\((.*?)\)', lambda m: json.dumps(m.group(0)), code) - return re.sub(r'''(?sx) - "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| - '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| - {comment}|,(?={skip}[\]}}])| + return re.sub(rf'''(?sx) + {STRING_RE}| + {COMMENT_RE}|,(?={SKIP_RE}[\]}}])| void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*| - \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| - [0-9]+(?={skip}:)| + \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?| + [0-9]+(?={SKIP_RE}:)| !+ - '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) + ''', fix_kv, code) def qualities(quality_ids): @@ -3402,24 +3559,23 @@ def parse_codecs(codecs_str): str.strip, codecs_str.strip().strip(',').split(',')))) vcodec, acodec, scodec, hdr = None, None, None, None for full_codec in split_codecs: - parts = full_codec.split('.') - codec = parts[0].replace('0', '') - if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', - 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'): - if not vcodec: - vcodec = '.'.join(parts[:4]) if codec in ('vp9', 'av1', 'hvc1') else full_codec - if codec in ('dvh1', 'dvhe'): - hdr = 'DV' - elif codec == 'av1' and len(parts) > 3 and parts[3] == '10': - hdr = 'HDR10' - elif full_codec.replace('0', '').startswith('vp9.2'): - hdr = 'HDR10' - elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): - if not acodec: - acodec = full_codec - elif codec in ('stpp', 'wvtt',): - if not scodec: - scodec = full_codec + parts = re.sub(r'0+(?=\d)', '', full_codec).split('.') + if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', + 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'): + if vcodec: + continue + vcodec = full_codec + if parts[0] in ('dvh1', 'dvhe'): + hdr = 'DV' + elif parts[0] == 'av1' and traverse_obj(parts, 3) == '10': + hdr = 'HDR10' + elif parts[:2] == ['vp9', '2']: + hdr = 'HDR10' + elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', + 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): + acodec = acodec or full_codec + elif parts[0] in ('stpp', 'wvtt'): + scodec = scodec or full_codec else: write_string(f'WARNING: Unknown codec {full_codec}\n') if vcodec or acodec or scodec: @@ -3437,6 +3593,46 @@ def parse_codecs(codecs_str): return {} +def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None): + assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts) + + allow_mkv = not preferences or 'mkv' in preferences + + if allow_mkv and max(len(acodecs), len(vcodecs)) > 1: + return 'mkv' # TODO: any other format allows this? + + # TODO: All codecs supported by parse_codecs isn't handled here + COMPATIBLE_CODECS = { + 'mp4': { + 'av1', 'hevc', 'avc1', 'mp4a', # fourcc (m3u8, mpd) + 'h264', 'aacl', 'ec-3', # Set in ISM + }, + 'webm': { + 'av1', 'vp9', 'vp8', 'opus', 'vrbs', + 'vp9x', 'vp8x', # in the webm spec + }, + } + + sanitize_codec = functools.partial(try_get, getter=lambda x: x[0].split('.')[0].replace('0', '')) + vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs) + + for ext in preferences or COMPATIBLE_CODECS.keys(): + codec_set = COMPATIBLE_CODECS.get(ext, set()) + if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)): + return ext + + COMPATIBLE_EXTS = ( + {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'}, + {'webm'}, + ) + for ext in preferences or vexts: + current_exts = {ext, *vexts, *aexts} + if ext == 'mkv' or current_exts == {ext} or any( + ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS): + return ext + return 'mkv' if allow_mkv else preferences[-1] + + def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get @@ -3465,17 +3661,19 @@ def age_restricted(content_limit, age_limit): return age_limit < content_limit +# List of known byte-order-marks (BOM) +BOMS = [ + (b'\xef\xbb\xbf', 'utf-8'), + (b'\x00\x00\xfe\xff', 'utf-32-be'), + (b'\xff\xfe\x00\x00', 'utf-32-le'), + (b'\xff\xfe', 'utf-16-le'), + (b'\xfe\xff', 'utf-16-be'), +] + + def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ - BOMS = [ - (b'\xef\xbb\xbf', 'utf-8'), - (b'\x00\x00\xfe\xff', 'utf-32-be'), - (b'\xff\xfe\x00\x00', 'utf-32-le'), - (b'\xff\xfe', 'utf-16-le'), - (b'\xfe\xff', 'utf-16-be'), - ] - encoding = 'utf-8' for bom, enc in BOMS: while first_bytes.startswith(bom): @@ -3499,7 +3697,7 @@ def determine_protocol(info_dict): ext = determine_ext(url) if ext == 'm3u8': - return 'm3u8' + return 'm3u8' if info_dict.get('is_live') else 'm3u8_native' elif ext == 'f4m': return 'f4m' @@ -3643,27 +3841,35 @@ def match_filter_func(filters): if not filters or any(match_str(f, info_dict, incomplete) for f in filters): return NO_DEFAULT if interactive and not incomplete else None else: - video_title = info_dict.get('title') or info_dict.get('id') or 'video' + video_title = info_dict.get('title') or info_dict.get('id') or 'entry' filter_str = ') | ('.join(map(str.strip, filters)) return f'{video_title} does not pass filter ({filter_str}), skipping ..' return _match_func -def download_range_func(chapters, ranges): - def inner(info_dict, ydl): +class download_range_func: + def __init__(self, chapters, ranges): + self.chapters, self.ranges = chapters, ranges + + def __call__(self, info_dict, ydl): + if not self.ranges and not self.chapters: + yield {} + warning = ('There are no chapters matching the regex' if info_dict.get('chapters') else 'Cannot match chapters since chapter information is unavailable') - for regex in chapters or []: + for regex in self.chapters or []: for i, chapter in enumerate(info_dict.get('chapters') or []): if re.search(regex, chapter['title']): warning = None yield {**chapter, 'index': i} - if chapters and warning: + if self.chapters and warning: ydl.to_screen(f'[info] {info_dict["id"]}: {warning}') - yield from ({'start_time': start, 'end_time': end} for start, end in ranges or []) + yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or []) - return inner + def __eq__(self, other): + return (isinstance(other, download_range_func) + and self.chapters == other.chapters and self.ranges == other.ranges) def parse_dfxp_time_expr(time_expr): @@ -4743,7 +4949,7 @@ def _base_n_table(n, table): raise ValueError('Either table or n must be specified') table = (table or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n] - if n != len(table): + if n and n != len(table): raise ValueError(f'base {n} exceeds table length {len(table)}') return table @@ -4771,8 +4977,8 @@ def decode_base_n(string, n=None, table=None): def decode_base(value, digits): - write_string('DeprecationWarning: yt_dlp.utils.decode_base is deprecated ' - 'and may be removed in a future version. Use yt_dlp.decode_base_n instead') + deprecation_warning(f'{__name__}.decode_base is deprecated and may be removed ' + f'in a future version. Use {__name__}.decode_base_n instead') return decode_base_n(value, table=digits) @@ -5143,106 +5349,177 @@ def load_plugins(name, suffix, namespace): def traverse_obj( - obj, *path_list, default=None, expected_type=None, get_all=True, + obj, *paths, default=NO_DEFAULT, expected_type=None, get_all=True, casesense=True, is_user_input=False, traverse_string=False): - ''' Traverse nested list/dict/tuple - @param path_list A list of paths which are checked one by one. - Each path is a list of keys where each key is a: - - None: Do nothing - - string: A dictionary key - - int: An index into a list - - tuple: A list of keys all of which will be traversed - - Ellipsis: Fetch all values in the object - - Function: Takes the key and value as arguments - and returns whether the key matches or not - @param default Default value to return - @param expected_type Only accept final value of this type (Can also be any callable) - @param get_all Return all the values obtained from a path or only the first one - @param casesense Whether to consider dictionary keys as case sensitive - @param is_user_input Whether the keys are generated from user input. If True, - strings are converted to int/slice if necessary - @param traverse_string Whether to traverse inside strings. If True, any - non-compatible object will also be converted into a string - # TODO: Write tests - ''' - if not casesense: - _lower = lambda k: (k.lower() if isinstance(k, str) else k) - path_list = (map(_lower, variadic(path)) for path in path_list) - - def _traverse_obj(obj, path, _current_depth=0): - nonlocal depth - path = tuple(variadic(path)) - for i, key in enumerate(path): - if None in (key, obj): - return obj - if isinstance(key, (list, tuple)): - obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key] - key = ... - if key is ...: - obj = (obj.values() if isinstance(obj, dict) - else obj if isinstance(obj, (list, tuple, LazyList)) - else str(obj) if traverse_string else []) - _current_depth += 1 - depth = max(depth, _current_depth) - return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj] - elif callable(key): - if isinstance(obj, (list, tuple, LazyList)): - obj = enumerate(obj) - elif isinstance(obj, dict): - obj = obj.items() - else: - if not traverse_string: - return None - obj = str(obj) - _current_depth += 1 - depth = max(depth, _current_depth) - return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if try_call(key, args=(k, v))] - elif isinstance(obj, dict) and not (is_user_input and key == ':'): - obj = (obj.get(key) if casesense or (key in obj) - else next((v for k, v in obj.items() if _lower(k) == key), None)) - else: - if is_user_input: - key = (int_or_none(key) if ':' not in key - else slice(*map(int_or_none, key.split(':')))) - if key == slice(None): - return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth) - if not isinstance(key, (int, slice)): - return None - if not isinstance(obj, (list, tuple, LazyList)): - if not traverse_string: - return None - obj = str(obj) - try: - obj = obj[key] - except IndexError: - return None - return obj + """ + Safely traverse nested `dict`s and `Sequence`s + + >>> obj = [{}, {"key": "value"}] + >>> traverse_obj(obj, (1, "key")) + "value" + + Each of the provided `paths` is tested and the first producing a valid result will be returned. + The next path will also be tested if the path branched but no results could be found. + Supported values for traversal are `Mapping`, `Sequence` and `re.Match`. + A value of None is treated as the absence of a value. + + The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`. + + The keys in the path can be one of: + - `None`: Return the current object. + - `str`/`int`: Return `obj[key]`. For `re.Match, return `obj.group(key)`. + - `slice`: Branch out and return all values in `obj[key]`. + - `Ellipsis`: Branch out and return a list of all values. + - `tuple`/`list`: Branch out and return a list of all matching values. + Read as: `[traverse_obj(obj, branch) for branch in branches]`. + - `function`: Branch out and return values filtered by the function. + Read as: `[value for key, value in obj if function(key, value)]`. + For `Sequence`s, `key` is the index of the value. + - `dict` Transform the current object and return a matching dict. + Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`. + + `tuple`, `list`, and `dict` all support nested paths and branches. + + @params paths Paths which to traverse by. + @param default Value to return if the paths do not match. + @param expected_type If a `type`, only accept final values of this type. + If any other callable, try to call the function on each result. + @param get_all If `False`, return the first matching result, otherwise all matching ones. + @param casesense If `False`, consider string dictionary keys as case insensitive. + + The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API + + @param is_user_input Whether the keys are generated from user input. + If `True` strings get converted to `int`/`slice` if needed. + @param traverse_string Whether to traverse into objects as strings. + If `True`, any non-compatible object will first be + converted into a string and then traversed into. + + + @returns The result of the object traversal. + If successful, `get_all=True`, and the path branches at least once, + then a list of results is returned instead. + A list is always returned if the last path branches and no `default` is given. + """ + is_sequence = lambda x: isinstance(x, collections.abc.Sequence) and not isinstance(x, (str, bytes)) + casefold = lambda k: k.casefold() if isinstance(k, str) else k if isinstance(expected_type, type): type_test = lambda val: val if isinstance(val, expected_type) else None else: - type_test = expected_type or IDENTITY - - for path in path_list: - depth = 0 - val = _traverse_obj(obj, path) - if val is not None: - if depth: - for _ in range(depth - 1): - val = itertools.chain.from_iterable(v for v in val if v is not None) - val = [v for v in map(type_test, val) if v is not None] - if val: - return val if get_all else val[0] + type_test = lambda val: try_call(expected_type or IDENTITY, args=(val,)) + + def apply_key(key, obj): + if obj is None: + return + + elif key is None: + yield obj + + elif isinstance(key, (list, tuple)): + for branch in key: + _, result = apply_path(obj, branch) + yield from result + + elif key is ...: + if isinstance(obj, collections.abc.Mapping): + yield from obj.values() + elif is_sequence(obj): + yield from obj + elif isinstance(obj, re.Match): + yield from obj.groups() + elif traverse_string: + yield from str(obj) + + elif callable(key): + if is_sequence(obj): + iter_obj = enumerate(obj) + elif isinstance(obj, collections.abc.Mapping): + iter_obj = obj.items() + elif isinstance(obj, re.Match): + iter_obj = enumerate((obj.group(), *obj.groups())) + elif traverse_string: + iter_obj = enumerate(str(obj)) else: - val = type_test(val) - if val is not None: - return val - return default + return + yield from (v for k, v in iter_obj if try_call(key, args=(k, v))) + + elif isinstance(key, dict): + iter_obj = ((k, _traverse_obj(obj, v)) for k, v in key.items()) + yield {k: v if v is not None else default for k, v in iter_obj + if v is not None or default is not NO_DEFAULT} + + elif isinstance(obj, collections.abc.Mapping): + yield (obj.get(key) if casesense or (key in obj) + else next((v for k, v in obj.items() if casefold(k) == key), None)) + + elif isinstance(obj, re.Match): + if isinstance(key, int) or casesense: + with contextlib.suppress(IndexError): + yield obj.group(key) + return + + if not isinstance(key, str): + return + + yield next((v for k, v in obj.groupdict().items() if casefold(k) == key), None) + + else: + if is_user_input: + key = (int_or_none(key) if ':' not in key + else slice(*map(int_or_none, key.split(':')))) + + if not isinstance(key, (int, slice)): + return + + if not is_sequence(obj): + if not traverse_string: + return + obj = str(obj) + + with contextlib.suppress(IndexError): + yield obj[key] + + def apply_path(start_obj, path): + objs = (start_obj,) + has_branched = False + + for key in variadic(path): + if is_user_input and key == ':': + key = ... + + if not casesense and isinstance(key, str): + key = key.casefold() + + if key is ... or isinstance(key, (list, tuple)) or callable(key): + has_branched = True + + key_func = functools.partial(apply_key, key) + objs = itertools.chain.from_iterable(map(key_func, objs)) + + return has_branched, objs + + def _traverse_obj(obj, path, use_list=True): + has_branched, results = apply_path(obj, path) + results = LazyList(x for x in map(type_test, results) if x is not None) + + if get_all and has_branched: + return results.exhaust() if results or use_list else None + + return results[0] if results else None + + for index, path in enumerate(paths, 1): + use_list = default is NO_DEFAULT and index == len(paths) + result = _traverse_obj(obj, path, use_list) + if result is not None: + return result + + return None if default is NO_DEFAULT else default def traverse_dict(dictn, keys, casesense=True): - write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated ' - 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead') + deprecation_warning(f'"{__name__}.traverse_dict" is deprecated and may be removed ' + f'in a future version. Use "{__name__}.traverse_obj" instead') return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True) @@ -5250,10 +5527,6 @@ def get_first(obj, keys, **kwargs): return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False) -def variadic(x, allowed_types=(str, bytes, dict)): - return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,) - - def time_seconds(**kwargs): t = datetime.datetime.now(datetime.timezone(datetime.timedelta(**kwargs))) return t.timestamp() @@ -5281,7 +5554,8 @@ def jwt_encode_hs256(payload_data, key, headers={}): # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256 def jwt_decode_hs256(jwt): header_b64, payload_b64, signature_b64 = jwt.split('.') - payload_data = json.loads(base64.urlsafe_b64decode(payload_b64)) + # add trailing ='s that may have been stripped, superfluous ='s are ignored + payload_data = json.loads(base64.urlsafe_b64decode(f'{payload_b64}===')) return payload_data @@ -5369,6 +5643,24 @@ def read_stdin(what): return sys.stdin +def determine_file_encoding(data): + """ + Detect the text encoding used + @returns (encoding, bytes to skip) + """ + + # BOM marks are given priority over declarations + for bom, enc in BOMS: + if data.startswith(bom): + return enc, len(bom) + + # Strip off all null bytes to match even when UTF-16 or UTF-32 is used. + # We ignore the endianness to get a good enough match + data = data.replace(b'\0', b'') + mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data) + return mobj.group(1).decode() if mobj else None, 0 + + class Config: own_args = None parsed_args = None @@ -5381,20 +5673,26 @@ class Config: def init(self, args=None, filename=None): assert not self.__initialized + self.own_args, self.filename = args, filename + return self.load_configs() + + def load_configs(self): directory = '' - if filename: - location = os.path.realpath(filename) + if self.filename: + location = os.path.realpath(self.filename) directory = os.path.dirname(location) if location in self._loaded_paths: return False self._loaded_paths.add(location) - self.own_args, self.__initialized = args, True - opts, _ = self.parser.parse_known_args(args) - self.parsed_args, self.filename = args, filename - + self.__initialized = True + opts, _ = self.parser.parse_known_args(self.own_args) + self.parsed_args = self.own_args for location in opts.config_locations or []: if location == '-': + if location in self._loaded_paths: + continue + self._loaded_paths.add(location) self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin') continue location = os.path.join(directory, expand_path(location)) @@ -5417,12 +5715,17 @@ class Config: @staticmethod def read_file(filename, default=[]): try: - optionf = open(filename) + optionf = open(filename, 'rb') except OSError: return default # silently skip if file is not present try: + enc, skip = determine_file_encoding(optionf.read(512)) + optionf.seek(skip, io.SEEK_SET) + except OSError: + enc = None # silently skip read errors + try: # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56 - contents = optionf.read() + contents = optionf.read().decode(enc or preferredencoding()) res = shlex.split(contents, comments=True) except Exception as err: raise ValueError(f'Unable to parse "{filename}": {err}') @@ -5467,7 +5770,7 @@ class Config: return self.parser.parse_args(self.all_args) -class WebSocketsWrapper(): +class WebSocketsWrapper: """Wraps websockets module to use in non-async scopes""" pool = None @@ -5543,15 +5846,41 @@ def merge_headers(*dicts): return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))} +def cached_method(f): + """Cache a method""" + signature = inspect.signature(f) + + @functools.wraps(f) + def wrapper(self, *args, **kwargs): + bound_args = signature.bind(self, *args, **kwargs) + bound_args.apply_defaults() + key = tuple(bound_args.arguments.values())[1:] + + cache = vars(self).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {}) + if key not in cache: + cache[key] = f(self, *args, **kwargs) + return cache[key] + return wrapper + + class classproperty: - """classmethod(property(func)) that works in py < 3.9""" + """property access for class methods with optional caching""" + def __new__(cls, func=None, *args, **kwargs): + if not func: + return functools.partial(cls, *args, **kwargs) + return super().__new__(cls) - def __init__(self, func): + def __init__(self, func, *, cache=False): functools.update_wrapper(self, func) self.func = func + self._cache = {} if cache else None def __get__(self, _, cls): - return self.func(cls) + if self._cache is None: + return self.func(cls) + elif cls not in self._cache: + self._cache[cls] = self.func(cls) + return self._cache[cls] class Namespace(types.SimpleNamespace): @@ -5565,6 +5894,406 @@ class Namespace(types.SimpleNamespace): return self.__dict__.items() +MEDIA_EXTENSIONS = Namespace( + common_video=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'), + video=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'), + common_audio=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'), + audio=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma'), + thumbnails=('jpg', 'png', 'webp'), + storyboards=('mhtml', ), + subtitles=('srt', 'vtt', 'ass', 'lrc'), + manifests=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'), +) +MEDIA_EXTENSIONS.video += MEDIA_EXTENSIONS.common_video +MEDIA_EXTENSIONS.audio += MEDIA_EXTENSIONS.common_audio + +KNOWN_EXTENSIONS = (*MEDIA_EXTENSIONS.video, *MEDIA_EXTENSIONS.audio, *MEDIA_EXTENSIONS.manifests) + + +class RetryManager: + """Usage: + for retry in RetryManager(...): + try: + ... + except SomeException as err: + retry.error = err + continue + """ + attempt, _error = 0, None + + def __init__(self, _retries, _error_callback, **kwargs): + self.retries = _retries or 0 + self.error_callback = functools.partial(_error_callback, **kwargs) + + def _should_retry(self): + return self._error is not NO_DEFAULT and self.attempt <= self.retries + + @property + def error(self): + if self._error is NO_DEFAULT: + return None + return self._error + + @error.setter + def error(self, value): + self._error = value + + def __iter__(self): + while self._should_retry(): + self.error = NO_DEFAULT + self.attempt += 1 + yield self + if self.error: + self.error_callback(self.error, self.attempt, self.retries) + + @staticmethod + def report_retry(e, count, retries, *, sleep_func, info, warn, error=None, suffix=None): + """Utility function for reporting retries""" + if count > retries: + if error: + return error(f'{e}. Giving up after {count - 1} retries') if count > 1 else error(str(e)) + raise e + + if not count: + return warn(e) + elif isinstance(e, ExtractorError): + e = remove_end(str_or_none(e.cause) or e.orig_msg, '.') + warn(f'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...') + + delay = float_or_none(sleep_func(n=count - 1)) if callable(sleep_func) else sleep_func + if delay: + info(f'Sleeping {delay:.2f} seconds ...') + time.sleep(delay) + + +def make_archive_id(ie, video_id): + ie_key = ie if isinstance(ie, str) else ie.ie_key() + return f'{ie_key.lower()} {video_id}' + + +def truncate_string(s, left, right=0): + assert left > 3 and right >= 0 + if s is None or len(s) <= left + right: + return s + return f'{s[:left-3]}...{s[-right:]}' + + +def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None): + assert 'all' in alias_dict, '"all" alias is required' + requested = list(start or []) + for val in options: + discard = val.startswith('-') + if discard: + val = val[1:] + + if val in alias_dict: + val = alias_dict[val] if not discard else [ + i[1:] if i.startswith('-') else f'-{i}' for i in alias_dict[val]] + # NB: Do not allow regex in aliases for performance + requested = orderedSet_from_options(val, alias_dict, start=requested) + continue + + current = (filter(re.compile(val, re.I).fullmatch, alias_dict['all']) if use_regex + else [val] if val in alias_dict['all'] else None) + if current is None: + raise ValueError(val) + + if discard: + for item in current: + while item in requested: + requested.remove(item) + else: + requested.extend(current) + + return orderedSet(requested) + + +class FormatSorter: + regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$' + + default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality', + 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec', + 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases + ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr', + 'height', 'width', 'proto', 'vext', 'abr', 'aext', + 'fps', 'fs_approx', 'source', 'id') + + settings = { + 'vcodec': {'type': 'ordered', 'regex': True, + 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']}, + 'acodec': {'type': 'ordered', 'regex': True, + 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']}, + 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range', + 'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]}, + 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol', + 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']}, + 'vext': {'type': 'ordered', 'field': 'video_ext', + 'order': ('mp4', 'mov', 'webm', 'flv', '', 'none'), + 'order_free': ('webm', 'mp4', 'mov', 'flv', '', 'none')}, + 'aext': {'type': 'ordered', 'field': 'audio_ext', + 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'webm', '', 'none'), + 'order_free': ('ogg', 'opus', 'webm', 'mp3', 'm4a', 'aac', '', 'none')}, + 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000}, + 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple', + 'field': ('vcodec', 'acodec'), + 'function': lambda it: int(any(v != 'none' for v in it))}, + 'ie_pref': {'priority': True, 'type': 'extractor'}, + 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)}, + 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)}, + 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1}, + 'quality': {'convert': 'float', 'default': -1}, + 'filesize': {'convert': 'bytes'}, + 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'}, + 'id': {'convert': 'string', 'field': 'format_id'}, + 'height': {'convert': 'float_none'}, + 'width': {'convert': 'float_none'}, + 'fps': {'convert': 'float_none'}, + 'channels': {'convert': 'float_none', 'field': 'audio_channels'}, + 'tbr': {'convert': 'float_none'}, + 'vbr': {'convert': 'float_none'}, + 'abr': {'convert': 'float_none'}, + 'asr': {'convert': 'float_none'}, + 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1}, + + 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')}, + 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True}, + 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')}, + 'ext': {'type': 'combined', 'field': ('vext', 'aext')}, + 'res': {'type': 'multiple', 'field': ('height', 'width'), + 'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))}, + + # Actual field names + 'format_id': {'type': 'alias', 'field': 'id'}, + 'preference': {'type': 'alias', 'field': 'ie_pref'}, + 'language_preference': {'type': 'alias', 'field': 'lang'}, + 'source_preference': {'type': 'alias', 'field': 'source'}, + 'protocol': {'type': 'alias', 'field': 'proto'}, + 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'}, + 'audio_channels': {'type': 'alias', 'field': 'channels'}, + + # Deprecated + 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True}, + 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True}, + 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True}, + 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True}, + 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True}, + 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True}, + 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True}, + 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True}, + 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True}, + 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True}, + 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True}, + 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True}, + 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True}, + 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True}, + 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}, + 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}, + 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}, + 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}, + 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}, + 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}, + } + + def __init__(self, ydl, field_preference): + self.ydl = ydl + self._order = [] + self.evaluate_params(self.ydl.params, field_preference) + if ydl.params.get('verbose'): + self.print_verbose_info(self.ydl.write_debug) + + def _get_field_setting(self, field, key): + if field not in self.settings: + if key in ('forced', 'priority'): + return False + self.ydl.deprecated_feature(f'Using arbitrary fields ({field}) for format sorting is ' + 'deprecated and may be removed in a future version') + self.settings[field] = {} + propObj = self.settings[field] + if key not in propObj: + type = propObj.get('type') + if key == 'field': + default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field + elif key == 'convert': + default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore' + else: + default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None) + propObj[key] = default + return propObj[key] + + def _resolve_field_value(self, field, value, convertNone=False): + if value is None: + if not convertNone: + return None + else: + value = value.lower() + conversion = self._get_field_setting(field, 'convert') + if conversion == 'ignore': + return None + if conversion == 'string': + return value + elif conversion == 'float_none': + return float_or_none(value) + elif conversion == 'bytes': + return parse_bytes(value) + elif conversion == 'order': + order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order') + use_regex = self._get_field_setting(field, 'regex') + list_length = len(order_list) + empty_pos = order_list.index('') if '' in order_list else list_length + 1 + if use_regex and value is not None: + for i, regex in enumerate(order_list): + if regex and re.match(regex, value): + return list_length - i + return list_length - empty_pos # not in list + else: # not regex or value = None + return list_length - (order_list.index(value) if value in order_list else empty_pos) + else: + if value.isnumeric(): + return float(value) + else: + self.settings[field]['convert'] = 'string' + return value + + def evaluate_params(self, params, sort_extractor): + self._use_free_order = params.get('prefer_free_formats', False) + self._sort_user = params.get('format_sort', []) + self._sort_extractor = sort_extractor + + def add_item(field, reverse, closest, limit_text): + field = field.lower() + if field in self._order: + return + self._order.append(field) + limit = self._resolve_field_value(field, limit_text) + data = { + 'reverse': reverse, + 'closest': False if limit is None else closest, + 'limit_text': limit_text, + 'limit': limit} + if field in self.settings: + self.settings[field].update(data) + else: + self.settings[field] = data + + sort_list = ( + tuple(field for field in self.default if self._get_field_setting(field, 'forced')) + + (tuple() if params.get('format_sort_force', False) + else tuple(field for field in self.default if self._get_field_setting(field, 'priority'))) + + tuple(self._sort_user) + tuple(sort_extractor) + self.default) + + for item in sort_list: + match = re.match(self.regex, item) + if match is None: + raise ExtractorError('Invalid format sort string "%s" given by extractor' % item) + field = match.group('field') + if field is None: + continue + if self._get_field_setting(field, 'type') == 'alias': + alias, field = field, self._get_field_setting(field, 'field') + if self._get_field_setting(alias, 'deprecated'): + self.ydl.deprecated_feature(f'Format sorting alias {alias} is deprecated and may ' + f'be removed in a future version. Please use {field} instead') + reverse = match.group('reverse') is not None + closest = match.group('separator') == '~' + limit_text = match.group('limit') + + has_limit = limit_text is not None + has_multiple_fields = self._get_field_setting(field, 'type') == 'combined' + has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit') + + fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,) + limits = limit_text.split(':') if has_multiple_limits else (limit_text,) if has_limit else tuple() + limit_count = len(limits) + for (i, f) in enumerate(fields): + add_item(f, reverse, closest, + limits[i] if i < limit_count + else limits[0] if has_limit and not has_multiple_limits + else None) + + def print_verbose_info(self, write_debug): + if self._sort_user: + write_debug('Sort order given by user: %s' % ', '.join(self._sort_user)) + if self._sort_extractor: + write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor)) + write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % ( + '+' if self._get_field_setting(field, 'reverse') else '', field, + '%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':', + self._get_field_setting(field, 'limit_text'), + self._get_field_setting(field, 'limit')) + if self._get_field_setting(field, 'limit_text') is not None else '') + for field in self._order if self._get_field_setting(field, 'visible')])) + + def _calculate_field_preference_from_value(self, format, field, type, value): + reverse = self._get_field_setting(field, 'reverse') + closest = self._get_field_setting(field, 'closest') + limit = self._get_field_setting(field, 'limit') + + if type == 'extractor': + maximum = self._get_field_setting(field, 'max') + if value is None or (maximum is not None and value >= maximum): + value = -1 + elif type == 'boolean': + in_list = self._get_field_setting(field, 'in_list') + not_in_list = self._get_field_setting(field, 'not_in_list') + value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1 + elif type == 'ordered': + value = self._resolve_field_value(field, value, True) + + # try to convert to number + val_num = float_or_none(value, default=self._get_field_setting(field, 'default')) + is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None + if is_num: + value = val_num + + return ((-10, 0) if value is None + else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher + else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest + else (0, value, 0) if not reverse and (limit is None or value <= limit) + else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit + else (-1, value, 0)) + + def _calculate_field_preference(self, format, field): + type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple + get_value = lambda f: format.get(self._get_field_setting(f, 'field')) + if type == 'multiple': + type = 'field' # Only 'field' is allowed in multiple for now + actual_fields = self._get_field_setting(field, 'field') + + value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields) + else: + value = get_value(field) + return self._calculate_field_preference_from_value(format, field, type, value) + + def calculate_preference(self, format): + # Determine missing protocol + if not format.get('protocol'): + format['protocol'] = determine_protocol(format) + + # Determine missing ext + if not format.get('ext') and 'url' in format: + format['ext'] = determine_ext(format['url']) + if format.get('vcodec') == 'none': + format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none' + format['video_ext'] = 'none' + else: + format['video_ext'] = format['ext'] + format['audio_ext'] = 'none' + # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported? + # format['preference'] = -1000 + + # Determine missing bitrates + if format.get('tbr') is None: + if format.get('vbr') is not None and format.get('abr') is not None: + format['tbr'] = format.get('vbr', 0) + format.get('abr', 0) + else: + if format.get('vcodec') != 'none' and format.get('vbr') is None: + format['vbr'] = format.get('tbr') - format.get('abr', 0) + if format.get('acodec') != 'none' and format.get('abr') is None: + format['abr'] = format.get('tbr') - format.get('vbr', 0) + + return tuple(self._calculate_field_preference(format, field) for field in self._order) + + # Deprecated has_certifi = bool(certifi) has_websockets = bool(websockets) |