diff options
Diffstat (limited to 'hypervideo_dl/utils.py')
-rw-r--r-- | hypervideo_dl/utils.py | 2873 |
1 files changed, 1818 insertions, 1055 deletions
diff --git a/hypervideo_dl/utils.py b/hypervideo_dl/utils.py index 6379872..45847f9 100644 --- a/hypervideo_dl/utils.py +++ b/hypervideo_dl/utils.py @@ -1,8 +1,3 @@ -#!/usr/bin/env python3 -# coding: utf-8 - -from __future__ import unicode_literals - import asyncio import atexit import base64 @@ -10,86 +5,59 @@ import binascii import calendar import codecs import collections +import collections.abc import contextlib -import ctypes import datetime -import email.utils import email.header +import email.utils import errno -import functools import gzip import hashlib import hmac +import html.entities +import html.parser +import http.client +import http.cookiejar import importlib.util +import inspect import io import itertools import json import locale import math +import mimetypes import operator import os import platform import random import re +import shlex import socket import ssl +import struct import subprocess import sys import tempfile import time import traceback +import types +import unicodedata +import urllib.error +import urllib.parse +import urllib.request import xml.etree.ElementTree import zlib -import mimetypes +from .compat import functools # isort: split from .compat import ( - compat_HTMLParseError, - compat_HTMLParser, - compat_HTTPError, - compat_basestring, - compat_brotli, - compat_chr, - compat_cookiejar, - compat_ctypes_WINFUNCTYPE, compat_etree_fromstring, compat_expanduser, - compat_html_entities, - compat_html_entities_html5, - compat_http_client, - compat_integer_types, - compat_numeric_types, - compat_kwargs, + compat_HTMLParseError, compat_os_name, - compat_parse_qs, - compat_shlex_split, compat_shlex_quote, - compat_str, - compat_struct_pack, - compat_struct_unpack, - compat_urllib_error, - compat_urllib_parse, - compat_urllib_parse_urlencode, - compat_urllib_parse_urlparse, - compat_urllib_parse_urlunparse, - compat_urllib_parse_quote, - compat_urllib_parse_quote_plus, - compat_urllib_parse_unquote_plus, - compat_urllib_request, - compat_urlparse, - compat_websockets, - compat_xpath, -) - -from .socks import ( - ProxyType, - sockssocket, ) - -try: - import certifi - has_certifi = True -except ImportError: - has_certifi = False +from .dependencies import brotli, certifi, websockets, xattr +from .socks import ProxyType, sockssocket def register_socks_protocols(): @@ -97,8 +65,8 @@ def register_socks_protocols(): # In Python < 2.6.5, urlsplit() suffers from bug https://bugs.python.org/issue7904 # URLs with protocols not in urlparse.uses_netloc are not handled correctly for scheme in ('socks', 'socks4', 'socks4a', 'socks5'): - if scheme not in compat_urlparse.uses_netloc: - compat_urlparse.uses_netloc.append(scheme) + if scheme not in urllib.parse.uses_netloc: + urllib.parse.uses_netloc.append(scheme) # This is not clearly defined otherwise @@ -153,7 +121,7 @@ def random_user_agent(): SUPPORTED_ENCODINGS = [ 'gzip', 'deflate' ] -if compat_brotli: +if brotli: SUPPORTED_ENCODINGS.append('br') std_headers = { @@ -170,6 +138,7 @@ USER_AGENTS = { NO_DEFAULT = object() +IDENTITY = lambda x: x ENGLISH_MONTH_NAMES = [ 'January', 'February', 'March', 'April', 'May', 'June', @@ -180,22 +149,22 @@ MONTH_NAMES = { 'fr': [ 'janvier', 'février', 'mars', 'avril', 'mai', 'juin', 'juillet', 'août', 'septembre', 'octobre', 'novembre', 'décembre'], + # these follow the genitive grammatical case (dopełniacz) + # some websites might be using nominative, which will require another month list + # https://en.wikibooks.org/wiki/Polish/Noun_cases + 'pl': ['stycznia', 'lutego', 'marca', 'kwietnia', 'maja', 'czerwca', + 'lipca', 'sierpnia', 'września', 'października', 'listopada', 'grudnia'], } -KNOWN_EXTENSIONS = ( - 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'aac', - 'flv', 'f4v', 'f4a', 'f4b', - 'webm', 'ogg', 'ogv', 'oga', 'ogx', 'spx', 'opus', - 'mkv', 'mka', 'mk3d', - 'avi', 'divx', - 'mov', - 'asf', 'wmv', 'wma', - '3gp', '3g2', - 'mp3', - 'flac', - 'ape', - 'wav', - 'f4f', 'f4m', 'm3u8', 'smil') +# From https://github.com/python/cpython/blob/3.11/Lib/email/_parseaddr.py#L36-L42 +TIMEZONE_NAMES = { + 'UT': 0, 'UTC': 0, 'GMT': 0, 'Z': 0, + 'AST': -4, 'ADT': -3, # Atlantic (used in Canada) + 'EST': -5, 'EDT': -4, # Eastern + 'CST': -6, 'CDT': -5, # Central + 'MST': -7, 'MDT': -6, # Mountain + 'PST': -8, 'PDT': -7 # Pacific +} # needed for sanitizing filenames in restricted mode ACCENT_CHARS = dict(zip('ÂÃÄÀÁÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖŐØŒÙÚÛÜŰÝÞßàáâãäåæçèéêëìíîïðñòóôõöőøœùúûüűýþÿ', @@ -255,6 +224,7 @@ DATE_FORMATS_DAY_FIRST.extend([ '%d/%m/%Y', '%d/%m/%y', '%d/%m/%Y %H:%M:%S', + '%d-%m-%Y %H:%M', ]) DATE_FORMATS_MONTH_FIRST = list(DATE_FORMATS) @@ -267,9 +237,12 @@ DATE_FORMATS_MONTH_FIRST.extend([ ]) PACKED_CODES_RE = r"}\('(.+)',(\d+),(\d+),'([^']+)'\.split\('\|'\)" -JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>(?P<json_ld>.+?)</script>' +JSON_LD_RE = r'(?is)<script[^>]+type=(["\']?)application/ld\+json\1[^>]*>\s*(?P<json_ld>{.+?}|\[.+?\])\s*</script>' + +NUMBER_RE = r'\d+(?:\.\d+)?' +@functools.cache def preferredencoding(): """Get preferred encoding. @@ -288,37 +261,9 @@ def preferredencoding(): def write_json_file(obj, fn): """ Encode obj as JSON and write it to fn, atomically if possible """ - fn = encodeFilename(fn) - if sys.version_info < (3, 0) and sys.platform != 'win32': - encoding = get_filesystem_encoding() - # os.path.basename returns a bytes object, but NamedTemporaryFile - # will fail if the filename contains non ascii characters unless we - # use a unicode object - path_basename = lambda f: os.path.basename(fn).decode(encoding) - # the same for os.path.dirname - path_dirname = lambda f: os.path.dirname(fn).decode(encoding) - else: - path_basename = os.path.basename - path_dirname = os.path.dirname - - args = { - 'suffix': '.tmp', - 'prefix': path_basename(fn) + '.', - 'dir': path_dirname(fn), - 'delete': False, - } - - # In Python 2.x, json.dump expects a bytestream. - # In Python 3.x, it writes to a character stream - if sys.version_info < (3, 0): - args['mode'] = 'wb' - else: - args.update({ - 'mode': 'w', - 'encoding': 'utf-8', - }) - - tf = tempfile.NamedTemporaryFile(**compat_kwargs(args)) + tf = tempfile.NamedTemporaryFile( + prefix=f'{os.path.basename(fn)}.', dir=os.path.dirname(fn), + suffix='.tmp', delete=False, mode='w', encoding='utf-8') try: with tf: @@ -326,39 +271,24 @@ def write_json_file(obj, fn): if sys.platform == 'win32': # Need to remove existing file on Windows, else os.rename raises # WindowsError or FileExistsError. - try: + with contextlib.suppress(OSError): os.unlink(fn) - except OSError: - pass - try: + with contextlib.suppress(OSError): mask = os.umask(0) os.umask(mask) os.chmod(tf.name, 0o666 & ~mask) - except OSError: - pass os.rename(tf.name, fn) except Exception: - try: + with contextlib.suppress(OSError): os.remove(tf.name) - except OSError: - pass raise -if sys.version_info >= (2, 7): - def find_xpath_attr(node, xpath, key, val=None): - """ Find the xpath xpath[@key=val] """ - assert re.match(r'^[a-zA-Z_-]+$', key) - expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val)) - return node.find(expr) -else: - def find_xpath_attr(node, xpath, key, val=None): - for f in node.findall(compat_xpath(xpath)): - if key not in f.attrib: - continue - if val is None or f.attrib.get(key) == val: - return f - return None +def find_xpath_attr(node, xpath, key, val=None): + """ Find the xpath xpath[@key=val] """ + assert re.match(r'^[a-zA-Z_-]+$', key) + expr = xpath + ('[@%s]' % key if val is None else f"[@{key}='{val}']") + return node.find(expr) # On python2.6 the xml.etree.ElementTree.Element methods don't support # the namespace parameter @@ -378,9 +308,9 @@ def xpath_with_ns(path, ns_map): def xpath_element(node, xpath, name=None, fatal=False, default=NO_DEFAULT): def _find_xpath(xpath): - return node.find(compat_xpath(xpath)) + return node.find(xpath) - if isinstance(xpath, (str, compat_str)): + if isinstance(xpath, str): n = _find_xpath(xpath) else: for xp in xpath: @@ -420,21 +350,21 @@ def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT): if default is not NO_DEFAULT: return default elif fatal: - name = '%s[@%s]' % (xpath, key) if name is None else name + name = f'{xpath}[@{key}]' if name is None else name raise ExtractorError('Could not find XML attribute %s' % name) else: return None return n.attrib[key] -def get_element_by_id(id, html): +def get_element_by_id(id, html, **kwargs): """Return the content of the tag with the specified ID in the passed HTML document""" - return get_element_by_attribute('id', id, html) + return get_element_by_attribute('id', id, html, **kwargs) -def get_element_html_by_id(id, html): +def get_element_html_by_id(id, html, **kwargs): """Return the html of the tag with the specified ID in the passed HTML document""" - return get_element_html_by_attribute('id', id, html) + return get_element_html_by_attribute('id', id, html, **kwargs) def get_element_by_class(class_name, html): @@ -449,27 +379,27 @@ def get_element_html_by_class(class_name, html): return retval[0] if retval else None -def get_element_by_attribute(attribute, value, html, escape_value=True): - retval = get_elements_by_attribute(attribute, value, html, escape_value) +def get_element_by_attribute(attribute, value, html, **kwargs): + retval = get_elements_by_attribute(attribute, value, html, **kwargs) return retval[0] if retval else None -def get_element_html_by_attribute(attribute, value, html, escape_value=True): - retval = get_elements_html_by_attribute(attribute, value, html, escape_value) +def get_element_html_by_attribute(attribute, value, html, **kargs): + retval = get_elements_html_by_attribute(attribute, value, html, **kargs) return retval[0] if retval else None -def get_elements_by_class(class_name, html): +def get_elements_by_class(class_name, html, **kargs): """Return the content of all tags with the specified class in the passed HTML document as a list""" return get_elements_by_attribute( - 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), + 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name), html, escape_value=False) def get_elements_html_by_class(class_name, html): """Return the html of all tags with the specified class in the passed HTML document as a list""" return get_elements_html_by_attribute( - 'class', r'[^\'"]*\b%s\b[^\'"]*' % re.escape(class_name), + 'class', r'[^\'"]*(?<=[\'"\s])%s(?=[\'"\s])[^\'"]*' % re.escape(class_name), html, escape_value=False) @@ -483,21 +413,23 @@ def get_elements_html_by_attribute(*args, **kwargs): return [whole for _, whole in get_elements_text_and_html_by_attribute(*args, **kwargs)] -def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value=True): +def get_elements_text_and_html_by_attribute(attribute, value, html, *, tag=r'[\w:.-]+', escape_value=True): """ Return the text (content) and the html (whole) of the tag with the specified attribute in the passed HTML document """ + if not value: + return - value_quote_optional = '' if re.match(r'''[\s"'`=<>]''', value) else '?' + quote = '' if re.match(r'''[\s"'`=<>]''', value) else '?' value = re.escape(value) if escape_value else value - partial_element_re = r'''(?x) - <(?P<tag>[a-zA-Z0-9:._-]+) + partial_element_re = rf'''(?x) + <(?P<tag>{tag}) (?:\s(?:[^>"']|"[^"]*"|'[^']*')*)? - \s%(attribute)s\s*=\s*(?P<_q>['"]%(vqo)s)(?-x:%(value)s)(?P=_q) - ''' % {'attribute': re.escape(attribute), 'value': value, 'vqo': value_quote_optional} + \s{re.escape(attribute)}\s*=\s*(?P<_q>['"]{quote})(?-x:{value})(?P=_q) + ''' for m in re.finditer(partial_element_re, html): content, whole = get_element_text_and_html_by_tag(m.group('tag'), html[m.start():]) @@ -508,7 +440,7 @@ def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value ) -class HTMLBreakOnClosingTagParser(compat_HTMLParser): +class HTMLBreakOnClosingTagParser(html.parser.HTMLParser): """ HTML parser which raises HTMLBreakOnClosingTagException upon reaching the closing tag for the first opening tag it has encountered, and can be used @@ -520,7 +452,7 @@ class HTMLBreakOnClosingTagParser(compat_HTMLParser): def __init__(self): self.tagstack = collections.deque() - compat_HTMLParser.__init__(self) + html.parser.HTMLParser.__init__(self) def __enter__(self): return self @@ -550,6 +482,7 @@ class HTMLBreakOnClosingTagParser(compat_HTMLParser): raise self.HTMLBreakOnClosingTagException() +# XXX: This should be far less strict def get_element_text_and_html_by_tag(tag, html): """ For the first element with the specified tag in the passed HTML document @@ -585,22 +518,23 @@ def get_element_text_and_html_by_tag(tag, html): raise compat_HTMLParseError('unexpected end of html') -class HTMLAttributeParser(compat_HTMLParser): +class HTMLAttributeParser(html.parser.HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} - compat_HTMLParser.__init__(self) + html.parser.HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) + raise compat_HTMLParseError('done') -class HTMLListAttrsParser(compat_HTMLParser): +class HTMLListAttrsParser(html.parser.HTMLParser): """HTML parser to gather the attributes for the elements of a list""" def __init__(self): - compat_HTMLParser.__init__(self) + html.parser.HTMLParser.__init__(self) self.items = [] self._level = 0 @@ -626,16 +560,11 @@ def extract_attributes(html_element): 'empty': '', 'noval': None, 'entity': '&', 'sq': '"', 'dq': '\'' }. - NB HTMLParser is stricter in Python 2.6 & 3.2 than in later versions, - but the cases in the unit test will work for all of 2.6, 2.7, 3.2-3.5. """ parser = HTMLAttributeParser() - try: + with contextlib.suppress(compat_HTMLParseError): parser.feed(html_element) parser.close() - # Older Python may throw HTMLParseError in case of malformed HTML - except compat_HTMLParseError: - pass return parser.attrs @@ -664,6 +593,24 @@ def clean_html(html): return html.strip() +class LenientJSONDecoder(json.JSONDecoder): + def __init__(self, *args, transform_source=None, ignore_extra=False, **kwargs): + self.transform_source, self.ignore_extra = transform_source, ignore_extra + super().__init__(*args, **kwargs) + + def decode(self, s): + if self.transform_source: + s = self.transform_source(s) + try: + if self.ignore_extra: + return self.raw_decode(s.lstrip())[0] + return super().decode(s) + except json.JSONDecodeError as e: + if e.pos is not None: + raise type(e)(f'{e.msg} in {s[e.pos-10:e.pos+10]!r}', s, e.pos) + raise + + def sanitize_open(filename, open_mode): """Try to open the given filename, and slightly tweak it if this fails. @@ -674,26 +621,33 @@ def sanitize_open(filename, open_mode): It returns the tuple (stream, definitive_file_name). """ - try: - if filename == '-': - if sys.platform == 'win32': - import msvcrt + if filename == '-': + if sys.platform == 'win32': + import msvcrt + + # stdout may be any IO stream, e.g. when using contextlib.redirect_stdout + with contextlib.suppress(io.UnsupportedOperation): msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) - return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) - stream = locked_file(filename, open_mode, block=False).open() - return (stream, filename) - except (IOError, OSError) as err: - if err.errno in (errno.EACCES,): - raise + return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename) - # In case of error, try to remove win32 forbidden chars - alt_filename = sanitize_path(filename) - if alt_filename == filename: - raise - else: - # An exception here should be caught in the caller - stream = locked_file(filename, open_mode, block=False).open() - return (stream, alt_filename) + for attempt in range(2): + try: + try: + if sys.platform == 'win32': + # FIXME: An exclusive lock also locks the file from being read. + # Since windows locks are mandatory, don't lock the file on windows (for now). + # Ref: https://github.com/hypervideo/hypervideo/issues/3124 + raise LockingUnsupportedError() + stream = locked_file(filename, open_mode, block=False).__enter__() + except OSError: + stream = open(filename, open_mode) + return stream, filename + except OSError as err: + if attempt or err.errno in (errno.EACCES,): + raise + old_filename, filename = filename, sanitize_path(filename) + if old_filename == filename: + raise def timeconvert(timestr): @@ -719,6 +673,9 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT): return ACCENT_CHARS[char] elif not restricted and char == '\n': return '\0 ' + elif is_id is NO_DEFAULT and not restricted and char in '"*:<>?|/\\': + # Replace with their full-width unicode counterparts + return {'/': '\u29F8', '\\': '\u29f9'}.get(char, chr(ord(char) + 0xfee0)) elif char == '?' or ord(char) < 32 or ord(char) == 127: return '' elif char == '"': @@ -731,11 +688,14 @@ def sanitize_filename(s, restricted=False, is_id=NO_DEFAULT): return '\0_' return char + # Replace look-alike Unicode glyphs + if restricted and (is_id is NO_DEFAULT or not is_id): + s = unicodedata.normalize('NFKC', s) s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s) # Handle timestamps result = ''.join(map(replace_insane, s)) if is_id is NO_DEFAULT: - result = re.sub('(\0.)(?:(?=\\1)..)+', r'\1', result) # Remove repeated substitute chars - STRIP_RE = '(?:\0.|[ _-])*' + result = re.sub(r'(\0.)(?:(?=\1)..)+', r'\1', result) # Remove repeated substitute chars + STRIP_RE = r'(?:\0.|[ _-])*' result = re.sub(f'^\0.{STRIP_RE}|{STRIP_RE}\0.$', '', result) # Remove substitute chars from start/end result = result.replace('\0', '') or '_' @@ -759,8 +719,6 @@ def sanitize_path(s, force=False): if sys.platform == 'win32': force = False drive_or_unc, _ = os.path.splitdrive(s) - if sys.version_info < (2, 7) and not drive_or_unc: - drive_or_unc, _ = os.path.splitunc(s) elif force: drive_or_unc = '' else: @@ -774,16 +732,18 @@ def sanitize_path(s, force=False): for path_part in norm_path] if drive_or_unc: sanitized_path.insert(0, drive_or_unc + os.path.sep) - elif force and s[0] == os.path.sep: + elif force and s and s[0] == os.path.sep: sanitized_path.insert(0, os.path.sep) return os.path.join(*sanitized_path) -def sanitize_url(url): +def sanitize_url(url, *, scheme='http'): # Prepend protocol-less URLs with `http:` scheme in order to mitigate # the number of unwanted failures due to missing protocol - if url.startswith('//'): - return 'http:%s' % url + if url is None: + return + elif url.startswith('//'): + return f'{scheme}:{url}' # Fix some common typos seen so far COMMON_TYPOS = ( # https://github.com/ytdl-org/youtube-dl/issues/15649 @@ -798,15 +758,15 @@ def sanitize_url(url): def extract_basic_auth(url): - parts = compat_urlparse.urlsplit(url) + parts = urllib.parse.urlsplit(url) if parts.username is None: return url, None - url = compat_urlparse.urlunsplit(parts._replace(netloc=( + url = urllib.parse.urlunsplit(parts._replace(netloc=( parts.hostname if parts.port is None else '%s:%d' % (parts.hostname, parts.port)))) auth_payload = base64.b64encode( - ('%s:%s' % (parts.username, parts.password or '')).encode('utf-8')) - return url, 'Basic ' + auth_payload.decode('utf-8') + ('%s:%s' % (parts.username, parts.password or '')).encode()) + return url, f'Basic {auth_payload.decode()}' def sanitized_Request(url, *args, **kwargs): @@ -814,7 +774,7 @@ def sanitized_Request(url, *args, **kwargs): if auth_header is not None: headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {}) headers['Authorization'] = auth_header - return compat_urllib_request.Request(url, *args, **kwargs) + return urllib.request.Request(url, *args, **kwargs) def expand_path(s): @@ -822,13 +782,16 @@ def expand_path(s): return os.path.expandvars(compat_expanduser(s)) -def orderedSet(iterable): - """ Remove all duplicates from the input iterable """ - res = [] - for el in iterable: - if el not in res: - res.append(el) - return res +def orderedSet(iterable, *, lazy=False): + """Remove all duplicates from the input iterable""" + def _iter(): + seen = [] # Do not use set since the items can be unhashable + for x in iterable: + if x not in seen: + seen.append(x) + yield x + + return _iter() if lazy else list(_iter()) def _htmlentity_transform(entity_with_semicolon): @@ -836,13 +799,13 @@ def _htmlentity_transform(entity_with_semicolon): entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity - if entity in compat_html_entities.name2codepoint: - return compat_chr(compat_html_entities.name2codepoint[entity]) + if entity in html.entities.name2codepoint: + return chr(html.entities.name2codepoint[entity]) - # TODO: HTML5 allows entities without a semicolon. For example, - # 'Éric' should be decoded as 'Éric'. - if entity_with_semicolon in compat_html_entities_html5: - return compat_html_entities_html5[entity_with_semicolon] + # TODO: HTML5 allows entities without a semicolon. + # E.g. 'Éric' should be decoded as 'Éric'. + if entity_with_semicolon in html.entities.html5: + return html.entities.html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: @@ -853,10 +816,8 @@ def _htmlentity_transform(entity_with_semicolon): else: base = 10 # See https://github.com/ytdl-org/youtube-dl/issues/7518 - try: - return compat_chr(int(numstr, base)) - except ValueError: - pass + with contextlib.suppress(ValueError): + return chr(int(numstr, base)) # Unknown entity in name, return its literal representation return '&%s;' % entity @@ -865,7 +826,7 @@ def _htmlentity_transform(entity_with_semicolon): def unescapeHTML(s): if s is None: return None - assert type(s) == compat_str + assert isinstance(s, str) return re.sub( r'&([^&;]+;)', lambda m: _htmlentity_transform(m.group(1)), s) @@ -883,12 +844,9 @@ def escapeHTML(text): def process_communicate_or_kill(p, *args, **kwargs): - try: - return p.communicate(*args, **kwargs) - except BaseException: # Including KeyboardInterrupt - p.kill() - p.wait() - raise + deprecation_warning(f'"{__name__}.process_communicate_or_kill" is deprecated and may be removed ' + f'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead') + return Popen.communicate_or_kill(p, *args, **kwargs) class Popen(subprocess.Popen): @@ -898,11 +856,54 @@ class Popen(subprocess.Popen): else: _startupinfo = None - def __init__(self, *args, **kwargs): - super(Popen, self).__init__(*args, **kwargs, startupinfo=self._startupinfo) + @staticmethod + def _fix_pyinstaller_ld_path(env): + """Restore LD_LIBRARY_PATH when using PyInstaller + Ref: https://github.com/pyinstaller/pyinstaller/blob/develop/doc/runtime-information.rst#ld_library_path--libpath-considerations + https://github.com/hypervideo/hypervideo/issues/4573 + """ + if not hasattr(sys, '_MEIPASS'): + return + + def _fix(key): + orig = env.get(f'{key}_ORIG') + if orig is None: + env.pop(key, None) + else: + env[key] = orig + + _fix('LD_LIBRARY_PATH') # Linux + _fix('DYLD_LIBRARY_PATH') # macOS + + def __init__(self, *args, env=None, text=False, **kwargs): + if env is None: + env = os.environ.copy() + self._fix_pyinstaller_ld_path(env) + + if text is True: + kwargs['universal_newlines'] = True # For 3.6 compatibility + kwargs.setdefault('encoding', 'utf-8') + kwargs.setdefault('errors', 'replace') + super().__init__(*args, env=env, **kwargs, startupinfo=self._startupinfo) def communicate_or_kill(self, *args, **kwargs): - return process_communicate_or_kill(self, *args, **kwargs) + try: + return self.communicate(*args, **kwargs) + except BaseException: # Including KeyboardInterrupt + self.kill(timeout=None) + raise + + def kill(self, *, timeout=0): + super().kill() + if timeout != 0: + self.wait(timeout=timeout) + + @classmethod + def run(cls, *args, timeout=None, **kwargs): + with cls(*args, **kwargs) as proc: + default = '' if proc.text_mode else b'' + stdout, stderr = proc.communicate_or_kill(timeout=timeout) + return stdout or default, stderr or default, proc.returncode def get_subprocess_encoding(): @@ -918,51 +919,23 @@ def get_subprocess_encoding(): def encodeFilename(s, for_subprocess=False): - """ - @param s The name of the file - """ - - assert type(s) == compat_str - - # Python 3 has a Unicode API - if sys.version_info >= (3, 0): - return s - - # Pass '' directly to use Unicode APIs on Windows 2000 and up - # (Detecting Windows NT 4 is tricky because 'major >= 4' would - # match Windows 9x series as well. Besides, NT 4 is obsolete.) - if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5: - return s - - # Jython assumes filenames are Unicode strings though reported as Python 2.x compatible - if sys.platform.startswith('java'): - return s - - return s.encode(get_subprocess_encoding(), 'ignore') + assert isinstance(s, str) + return s def decodeFilename(b, for_subprocess=False): - - if sys.version_info >= (3, 0): - return b - - if not isinstance(b, bytes): - return b - - return b.decode(get_subprocess_encoding(), 'ignore') + return b def encodeArgument(s): - if not isinstance(s, compat_str): - # Legacy code that uses byte strings - # Uncomment the following line after fixing all post processors - # assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s)) - s = s.decode('ascii') - return encodeFilename(s, True) + # Legacy code that uses byte strings + # Uncomment the following line after fixing all post processors + # assert isinstance(s, str), 'Internal error: %r should be of type %r, is %r' % (s, str, type(s)) + return s if isinstance(s, str) else s.decode('ascii') def decodeArgument(b): - return decodeFilename(b, True) + return b def decodeOption(optval): @@ -971,7 +944,7 @@ def decodeOption(optval): if isinstance(optval, bytes): optval = optval.decode(preferredencoding()) - assert isinstance(optval, compat_str) + assert isinstance(optval, str) return optval @@ -1005,10 +978,8 @@ def _ssl_load_windows_store_certs(ssl_context, storename): except PermissionError: return for cert in certs: - try: + with contextlib.suppress(ssl.SSLError): ssl_context.load_verify_locations(cadata=cert) - except ssl.SSLError: - pass def make_HTTPS_handler(params, **kwargs): @@ -1017,6 +988,28 @@ def make_HTTPS_handler(params, **kwargs): context.check_hostname = opts_check_certificate if params.get('legacyserverconnect'): context.options |= 4 # SSL_OP_LEGACY_SERVER_CONNECT + # Allow use of weaker ciphers in Python 3.10+. See https://bugs.python.org/issue43998 + context.set_ciphers('DEFAULT') + elif ( + sys.version_info < (3, 10) + and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1) + and not ssl.OPENSSL_VERSION.startswith('LibreSSL') + ): + # Backport the default SSL ciphers and minimum TLS version settings from Python 3.10 [1]. + # This is to ensure consistent behavior across Python versions, and help avoid fingerprinting + # in some situations [2][3]. + # Python 3.10 only supports OpenSSL 1.1.1+ [4]. Because this change is likely + # untested on older versions, we only apply this to OpenSSL 1.1.1+ to be safe. + # LibreSSL is excluded until further investigation due to cipher support issues [5][6]. + # 1. https://github.com/python/cpython/commit/e983252b516edb15d4338b0a47631b59ef1e2536 + # 2. https://github.com/hypervideo/hypervideo/issues/4627 + # 3. https://github.com/hypervideo/hypervideo/pull/5294 + # 4. https://peps.python.org/pep-0644/ + # 5. https://peps.python.org/pep-0644/#libressl-support + # 6. https://github.com/hypervideo/hypervideo/commit/5b9f253fa0aee996cf1ed30185d4b502e00609c4#commitcomment-89054368 + context.set_ciphers('@SECLEVEL=2:ECDH+AESGCM:ECDH+CHACHA20:ECDH+AES:DHE+AES:!aNULL:!eNULL:!aDSS:!SHA1:!AESCCM') + context.minimum_version = ssl.TLSVersion.TLSv1_2 + context.verify_mode = ssl.CERT_REQUIRED if opts_check_certificate else ssl.CERT_NONE if opts_check_certificate: if has_certifi and 'no-certifi' not in params.get('compat_opts', []): @@ -1030,12 +1023,25 @@ def make_HTTPS_handler(params, **kwargs): except ssl.SSLError: # enum_certificates is not present in mingw python. See https://github.com/hypervideo/hypervideo/issues/1151 if sys.platform == 'win32' and hasattr(ssl, 'enum_certificates'): - # Create a new context to discard any certificates that were already loaded - context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) - context.check_hostname, context.verify_mode = True, ssl.CERT_REQUIRED for storename in ('CA', 'ROOT'): _ssl_load_windows_store_certs(context, storename) context.set_default_verify_paths() + + client_certfile = params.get('client_certificate') + if client_certfile: + try: + context.load_cert_chain( + client_certfile, keyfile=params.get('client_certificate_key'), + password=params.get('client_certificate_password')) + except ssl.SSLError: + raise YoutubeDLError('Unable to load client certificate') + + # Some servers may reject requests if ALPN extension is not sent. See: + # https://github.com/python/cpython/issues/85140 + # https://github.com/hypervideo/hypervideo/issues/3878 + with contextlib.suppress(NotImplementedError): + context.set_alpn_protocols(['http/1.1']) + return YoutubeDLHTTPSHandler(params, context=context, **kwargs) @@ -1063,7 +1069,7 @@ class YoutubeDLError(Exception): super().__init__(self.msg) -network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error] +network_exceptions = [urllib.error.URLError, http.client.HTTPException, socket.error] if hasattr(ssl, 'CertificateError'): network_exceptions.append(ssl.CertificateError) network_exceptions = tuple(network_exceptions) @@ -1086,13 +1092,18 @@ class ExtractorError(YoutubeDLError): self.video_id = video_id self.ie = ie self.exc_info = sys.exc_info() # preserve original exception + if isinstance(self.exc_info[1], ExtractorError): + self.exc_info = self.exc_info[1].exc_info + super().__init__(self.__msg) - super(ExtractorError, self).__init__(''.join(( - format_field(ie, template='[%s] '), - format_field(video_id, template='%s: '), - msg, - format_field(cause, template=' (caused by %r)'), - '' if expected else bug_reports_message()))) + @property + def __msg(self): + return ''.join(( + format_field(self.ie, None, '[%s] '), + format_field(self.video_id, None, '%s: '), + self.orig_msg, + format_field(self.cause, None, ' (caused by %r)'), + '' if self.expected else bug_reports_message())) def format_traceback(self): return join_nonempty( @@ -1100,10 +1111,16 @@ class ExtractorError(YoutubeDLError): self.cause and ''.join(traceback.format_exception(None, self.cause, self.cause.__traceback__)[1:]), delim='\n') or None + def __setattr__(self, name, value): + super().__setattr__(name, value) + if getattr(self, 'msg', None) and name not in ('msg', 'args'): + self.msg = self.__msg or type(self).__name__ + self.args = (self.msg, ) # Cannot be property + class UnsupportedError(ExtractorError): def __init__(self, url): - super(UnsupportedError, self).__init__( + super().__init__( 'Unsupported URL: %s' % url, expected=True) self.url = url @@ -1122,10 +1139,18 @@ class GeoRestrictedError(ExtractorError): def __init__(self, msg, countries=None, **kwargs): kwargs['expected'] = True - super(GeoRestrictedError, self).__init__(msg, **kwargs) + super().__init__(msg, **kwargs) self.countries = countries +class UserNotLive(ExtractorError): + """Error when a channel/user is not live""" + + def __init__(self, msg=None, **kwargs): + kwargs['expected'] = True + super().__init__(msg or 'The channel is not currently live', **kwargs) + + class DownloadError(YoutubeDLError): """Download Error exception. @@ -1136,7 +1161,7 @@ class DownloadError(YoutubeDLError): def __init__(self, msg, exc_info=None): """ exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """ - super(DownloadError, self).__init__(msg) + super().__init__(msg) self.exc_info = exc_info @@ -1230,9 +1255,7 @@ class ContentTooShortError(YoutubeDLError): """ def __init__(self, downloaded, expected): - super(ContentTooShortError, self).__init__( - 'Downloaded {0} bytes, expected {1} bytes'.format(downloaded, expected) - ) + super().__init__(f'Downloaded {downloaded} bytes, expected {expected} bytes') # Both in bytes self.downloaded = downloaded self.expected = expected @@ -1240,7 +1263,7 @@ class ContentTooShortError(YoutubeDLError): class XAttrMetadataError(YoutubeDLError): def __init__(self, code=None, msg='Unknown error'): - super(XAttrMetadataError, self).__init__(msg) + super().__init__(msg) self.code = code self.msg = msg @@ -1259,12 +1282,7 @@ class XAttrUnavailableError(YoutubeDLError): def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): - # Working around python 2 bug (see http://bugs.python.org/issue17849) by limiting - # expected HTTP responses to meet HTTP/1.0 or later (see also - # https://github.com/ytdl-org/youtube-dl/issues/6727) - if sys.version_info < (3, 0): - kwargs['strict'] = True - hc = http_class(*args, **compat_kwargs(kwargs)) + hc = http_class(*args, **kwargs) source_address = ydl_handler._params.get('source_address') if source_address is not None: @@ -1281,7 +1299,7 @@ def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): ip_addrs = [addr for addr in addrs if addr[0] == af] if addrs and not ip_addrs: ip_version = 'v4' if af == socket.AF_INET else 'v6' - raise socket.error( + raise OSError( "No remote IP%s addresses available for connect, can't use '%s' as source address" % (ip_version, source_address[0])) for res in ip_addrs: @@ -1295,30 +1313,17 @@ def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs): sock.connect(sa) err = None # Explicitly break reference cycle return sock - except socket.error as _: + except OSError as _: err = _ if sock is not None: sock.close() if err is not None: raise err else: - raise socket.error('getaddrinfo returns an empty list') + raise OSError('getaddrinfo returns an empty list') if hasattr(hc, '_create_connection'): hc._create_connection = _create_connection - sa = (source_address, 0) - if hasattr(hc, 'source_address'): # Python 2.7+ - hc.source_address = sa - else: # Python 2.6 - def _hc_connect(self, *args, **kwargs): - sock = _create_connection( - (self.host, self.port), self.timeout, sa) - if is_https: - self.sock = ssl.wrap_socket( - sock, self.key_file, self.cert_file, - ssl_version=ssl.PROTOCOL_TLSv1) - else: - self.sock = sock - hc.connect = functools.partial(_hc_connect, hc) + hc.source_address = (source_address, 0) return hc @@ -1327,13 +1332,13 @@ def handle_youtubedl_headers(headers): filtered_headers = headers if 'Youtubedl-no-compression' in filtered_headers: - filtered_headers = dict((k, v) for k, v in filtered_headers.items() if k.lower() != 'accept-encoding') + filtered_headers = {k: v for k, v in filtered_headers.items() if k.lower() != 'accept-encoding'} del filtered_headers['Youtubedl-no-compression'] return filtered_headers -class YoutubeDLHandler(compat_urllib_request.HTTPHandler): +class YoutubeDLHandler(urllib.request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds @@ -1352,11 +1357,11 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """ def __init__(self, params, *args, **kwargs): - compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) + urllib.request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): - conn_class = compat_http_client.HTTPConnection + conn_class = http.client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: @@ -1380,7 +1385,7 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): def brotli(data): if not data: return data - return compat_brotli.decompress(data) + return brotli.decompress(data) def http_request(self, req): # According to RFC 3986, URLs can not contain non-ASCII characters, however this is not @@ -1409,12 +1414,7 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): req.headers = handle_youtubedl_headers(req.headers) - if sys.version_info < (2, 7) and '#' in req.get_full_url(): - # Python 2.6 is brain-dead when it comes to fragments - req._Request__original = req._Request__original.partition('#')[0] - req._Request__r_type = req._Request__r_type.partition('#')[0] - - return req + return super().do_request_(req) def http_response(self, req, resp): old_resp = resp @@ -1424,30 +1424,30 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb') try: uncompressed = io.BytesIO(gz.read()) - except IOError as original_ioerror: + except OSError as original_ioerror: # There may be junk add the end of the file # See http://stackoverflow.com/q/4928560/35070 for details for i in range(1, 1024): try: gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb') uncompressed = io.BytesIO(gz.read()) - except IOError: + except OSError: continue break else: raise original_ioerror - resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) + resp = urllib.request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) - resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) + resp = urllib.request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # brotli if resp.headers.get('Content-encoding', '') == 'br': - resp = compat_urllib_request.addinfourl( + resp = urllib.request.addinfourl( io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] @@ -1457,15 +1457,10 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): location = resp.headers.get('Location') if location: # As of RFC 2616 default charset is iso-8859-1 that is respected by python 3 - if sys.version_info >= (3, 0): - location = location.encode('iso-8859-1').decode('utf-8') - else: - location = location.decode('utf-8') + location = location.encode('iso-8859-1').decode() location_escaped = escape_url(location) if location != location_escaped: del resp.headers['Location'] - if sys.version_info < (3, 0): - location_escaped = location_escaped.encode('utf-8') resp.headers['Location'] = location_escaped return resp @@ -1475,9 +1470,9 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( - compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) + http.client.HTTPConnection, http.client.HTTPSConnection)) - url_components = compat_urlparse.urlparse(socks_proxy) + url_components = urllib.parse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': socks_type = ProxyType.SOCKS5 elif url_components.scheme.lower() in ('socks', 'socks4'): @@ -1488,7 +1483,7 @@ def make_socks_conn_class(base_class, socks_proxy): def unquote_if_non_empty(s): if not s: return s - return compat_urllib_parse_unquote_plus(s) + return urllib.parse.unquote_plus(s) proxy_args = ( socks_type, @@ -1502,11 +1497,11 @@ def make_socks_conn_class(base_class, socks_proxy): def connect(self): self.sock = sockssocket() self.sock.setproxy(*proxy_args) - if type(self.timeout) in (int, float): + if isinstance(self.timeout, (int, float)): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) - if isinstance(self, compat_http_client.HTTPSConnection): + if isinstance(self, http.client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) @@ -1516,10 +1511,10 @@ def make_socks_conn_class(base_class, socks_proxy): return SocksConnection -class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): +class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): - compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) - self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection + urllib.request.HTTPSHandler.__init__(self, *args, **kwargs) + self._https_conn_class = https_conn_class or http.client.HTTPSConnection self._params = params def https_open(self, req): @@ -1536,12 +1531,21 @@ class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): conn_class = make_socks_conn_class(conn_class, socks_proxy) del req.headers['Ytdl-socks-proxy'] - return self.do_open(functools.partial( - _create_http_connection, self, conn_class, True), - req, **kwargs) + try: + return self.do_open( + functools.partial(_create_http_connection, self, conn_class, True), req, **kwargs) + except urllib.error.URLError as e: + if (isinstance(e.reason, ssl.SSLError) + and getattr(e.reason, 'reason', None) == 'SSLV3_ALERT_HANDSHAKE_FAILURE'): + raise YoutubeDLError('SSLV3_ALERT_HANDSHAKE_FAILURE: Try using --legacy-server-connect') + raise -class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): +def is_path_like(f): + return isinstance(f, (str, bytes, os.PathLike)) + + +class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar): """ See [1] for cookie file format. @@ -1557,57 +1561,67 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): 'CookieFileEntry', ('domain_name', 'include_subdomains', 'path', 'https_only', 'expires_at', 'name', 'value')) - def save(self, filename=None, ignore_discard=False, ignore_expires=False): + def __init__(self, filename=None, *args, **kwargs): + super().__init__(None, *args, **kwargs) + if is_path_like(filename): + filename = os.fspath(filename) + self.filename = filename + + @staticmethod + def _true_or_false(cndn): + return 'TRUE' if cndn else 'FALSE' + + @contextlib.contextmanager + def open(self, file, *, write=False): + if is_path_like(file): + with open(file, 'w' if write else 'r', encoding='utf-8') as f: + yield f + else: + if write: + file.truncate(0) + yield file + + def _really_save(self, f, ignore_discard=False, ignore_expires=False): + now = time.time() + for cookie in self: + if (not ignore_discard and cookie.discard + or not ignore_expires and cookie.is_expired(now)): + continue + name, value = cookie.name, cookie.value + if value is None: + # cookies.txt regards 'Set-Cookie: foo' as a cookie + # with no name, whereas http.cookiejar regards it as a + # cookie with no value. + name, value = '', name + f.write('%s\n' % '\t'.join(( + cookie.domain, + self._true_or_false(cookie.domain.startswith('.')), + cookie.path, + self._true_or_false(cookie.secure), + str_or_none(cookie.expires, default=''), + name, value + ))) + + def save(self, filename=None, *args, **kwargs): """ Save cookies to a file. + Code is taken from CPython 3.6 + https://github.com/python/cpython/blob/8d999cbf4adea053be6dbb612b9844635c4dfb8e/Lib/http/cookiejar.py#L2091-L2117 """ - Most of the code is taken from CPython 3.8 and slightly adapted - to support cookie files with UTF-8 in both python 2 and 3. - """ if filename is None: if self.filename is not None: filename = self.filename else: - raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT) + raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT) - # Store session cookies with `expires` set to 0 instead of an empty - # string + # Store session cookies with `expires` set to 0 instead of an empty string for cookie in self: if cookie.expires is None: cookie.expires = 0 - with io.open(filename, 'w', encoding='utf-8') as f: + with self.open(filename, write=True) as f: f.write(self._HEADER) - now = time.time() - for cookie in self: - if not ignore_discard and cookie.discard: - continue - if not ignore_expires and cookie.is_expired(now): - continue - if cookie.secure: - secure = 'TRUE' - else: - secure = 'FALSE' - if cookie.domain.startswith('.'): - initial_dot = 'TRUE' - else: - initial_dot = 'FALSE' - if cookie.expires is not None: - expires = compat_str(cookie.expires) - else: - expires = '' - if cookie.value is None: - # cookies.txt regards 'Set-Cookie: foo' as a cookie - # with no name, whereas http.cookiejar regards it as a - # cookie with no value. - name = '' - value = cookie.name - else: - name = cookie.name - value = cookie.value - f.write( - '\t'.join([cookie.domain, initial_dot, cookie.path, - secure, expires, name, value]) + '\n') + self._really_save(f, *args, **kwargs) def load(self, filename=None, ignore_discard=False, ignore_expires=False): """Load cookies from a file.""" @@ -1615,7 +1629,7 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): if self.filename is not None: filename = self.filename else: - raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT) + raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT) def prepare_line(line): if line.startswith(self._HTTPONLY_PREFIX): @@ -1625,21 +1639,23 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): return line cookie_list = line.split('\t') if len(cookie_list) != self._ENTRY_LEN: - raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list)) + raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list)) cookie = self._CookieFileEntry(*cookie_list) if cookie.expires_at and not cookie.expires_at.isdigit(): - raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at) + raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at) return line cf = io.StringIO() - with io.open(filename, encoding='utf-8') as f: + with self.open(filename) as f: for line in f: try: cf.write(prepare_line(line)) - except compat_cookiejar.LoadError as e: - write_string( - 'WARNING: skipping cookie file entry due to %s: %r\n' - % (e, line), sys.stderr) + except http.cookiejar.LoadError as e: + if f'{line.strip()} '[0] in '[{"': + raise http.cookiejar.LoadError( + 'Cookies file must be Netscape formatted, not JSON. See ' + 'https://github.com/hypervideo/hypervideo/wiki/FAQ#how-do-i-pass-cookies-to-hypervideo') + write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n') continue cf.seek(0) self._really_load(cf, filename, ignore_discard, ignore_expires) @@ -1659,31 +1675,18 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): cookie.discard = True -class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): +class YoutubeDLCookieProcessor(urllib.request.HTTPCookieProcessor): def __init__(self, cookiejar=None): - compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) + urllib.request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): - # Python 2 will choke on next HTTP request in row if there are non-ASCII - # characters in Set-Cookie HTTP header of last response (see - # https://github.com/ytdl-org/youtube-dl/issues/6769). - # In order to at least prevent crashing we will percent encode Set-Cookie - # header before HTTPCookieProcessor starts processing it. - # if sys.version_info < (3, 0) and response.headers: - # for set_cookie_header in ('Set-Cookie', 'Set-Cookie2'): - # set_cookie = response.headers.get(set_cookie_header) - # if set_cookie: - # set_cookie_escaped = compat_urllib_parse.quote(set_cookie, b"%/;:@&=+$,!~*'()?#[] ") - # if set_cookie != set_cookie_escaped: - # del response.headers[set_cookie_header] - # response.headers[set_cookie_header] = set_cookie_escaped - return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) - - https_request = compat_urllib_request.HTTPCookieProcessor.http_request + return urllib.request.HTTPCookieProcessor.http_response(self, request, response) + + https_request = urllib.request.HTTPCookieProcessor.http_request https_response = http_response -class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler): +class YoutubeDLRedirectHandler(urllib.request.HTTPRedirectHandler): """YoutubeDL redirect handler The code is based on HTTPRedirectHandler implementation from CPython [1]. @@ -1698,7 +1701,7 @@ class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler): 3. https://github.com/ytdl-org/youtube-dl/issues/28768 """ - http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302 + http_error_301 = http_error_303 = http_error_307 = http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302 def redirect_request(self, req, fp, code, msg, headers, newurl): """Return a Request or None in response to a redirect. @@ -1713,19 +1716,13 @@ class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler): m = req.get_method() if (not (code in (301, 302, 303, 307, 308) and m in ("GET", "HEAD") or code in (301, 302, 303) and m == "POST")): - raise compat_HTTPError(req.full_url, code, msg, headers, fp) + raise urllib.error.HTTPError(req.full_url, code, msg, headers, fp) # Strictly (according to RFC 2616), 301 or 302 in response to # a POST MUST NOT cause a redirection without confirmation # from the user (of urllib.request, in this case). In practice, # essentially all clients do redirect in this case, so we do # the same. - # On python 2 urlh.geturl() may sometimes return redirect URL - # as byte string instead of unicode. This workaround allows - # to force it always return unicode. - if sys.version_info[0] < 3: - newurl = compat_str(newurl) - # Be conciliant with URIs containing a space. This is mainly # redundant with the more complete encoding done in http_error_302(), # but it is kept for compatibility with other callers. @@ -1733,11 +1730,22 @@ class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler): CONTENT_HEADERS = ("content-length", "content-type") # NB: don't use dict comprehension for python 2.6 compatibility - newheaders = dict((k, v) for k, v in req.headers.items() - if k.lower() not in CONTENT_HEADERS) - return compat_urllib_request.Request( + newheaders = {k: v for k, v in req.headers.items() if k.lower() not in CONTENT_HEADERS} + + # A 303 must either use GET or HEAD for subsequent request + # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.4 + if code == 303 and m != 'HEAD': + m = 'GET' + # 301 and 302 redirects are commonly turned into a GET from a POST + # for subsequent requests by browsers, so we'll do the same. + # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.2 + # https://datatracker.ietf.org/doc/html/rfc7231#section-6.4.3 + if code in (301, 302) and m == 'POST': + m = 'GET' + + return urllib.request.Request( newurl, headers=newheaders, origin_req_host=req.origin_req_host, - unverifiable=True) + unverifiable=True, method=m) def extract_timezone(date_str): @@ -1753,7 +1761,11 @@ def extract_timezone(date_str): $) ''', date_str) if not m: - timezone = datetime.timedelta() + m = re.search(r'\d{1,2}:\d{1,2}(?:\.\d+)?(?P<tz>\s*[A-Z]+)$', date_str) + timezone = TIMEZONE_NAMES.get(m and m.group('tz').strip()) + if timezone is not None: + date_str = date_str[:-len(m.group('tz'))] + timezone = datetime.timedelta(hours=timezone or 0) else: date_str = date_str[:-len(m.group('tz'))] if not m.group('sign'): @@ -1777,12 +1789,10 @@ def parse_iso8601(date_str, delimiter='T', timezone=None): if timezone is None: timezone, date_str = extract_timezone(date_str) - try: - date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter) + with contextlib.suppress(ValueError): + date_format = f'%Y-%m-%d{delimiter}%H:%M:%S' dt = datetime.datetime.strptime(date_str, date_format) - timezone return calendar.timegm(dt.timetuple()) - except ValueError: - pass def date_formats(day_first=True): @@ -1802,26 +1812,23 @@ def unified_strdate(date_str, day_first=True): _, date_str = extract_timezone(date_str) for expression in date_formats(day_first): - try: + with contextlib.suppress(ValueError): upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d') - except ValueError: - pass if upload_date is None: timetuple = email.utils.parsedate_tz(date_str) if timetuple: - try: + with contextlib.suppress(ValueError): upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d') - except ValueError: - pass if upload_date is not None: - return compat_str(upload_date) + return str(upload_date) def unified_timestamp(date_str, day_first=True): if date_str is None: return None - date_str = re.sub(r'[,|]', '', date_str) + date_str = re.sub(r'\s+', ' ', re.sub( + r'(?i)[,|]|(mon|tues?|wed(nes)?|thu(rs)?|fri|sat(ur)?)(day)?', '', date_str)) pm_delta = 12 if re.search(r'(?i)PM', date_str) else 0 timezone, date_str = extract_timezone(date_str) @@ -1840,14 +1847,13 @@ def unified_timestamp(date_str, day_first=True): date_str = m.group(1) for expression in date_formats(day_first): - try: + with contextlib.suppress(ValueError): dt = datetime.datetime.strptime(date_str, expression) - timezone + datetime.timedelta(hours=pm_delta) return calendar.timegm(dt.timetuple()) - except ValueError: - pass + timetuple = email.utils.parsedate_tz(date_str) if timetuple: - return calendar.timegm(timetuple) + pm_delta * 3600 + return calendar.timegm(timetuple) + pm_delta * 3600 - timezone.total_seconds() def determine_ext(url, default_ext='unknown_video'): @@ -1868,14 +1874,14 @@ def subtitles_filename(filename, sub_lang, sub_format, expected_real_ext=None): def datetime_from_str(date_str, precision='auto', format='%Y%m%d'): - """ - Return a datetime object from a string in the format YYYYMMDD or - (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)? - - format: string date format used to return datetime object from - precision: round the time portion of a datetime object. - auto|microsecond|second|minute|hour|day. - auto: round to the unit provided in date_str (if applicable). + R""" + Return a datetime object from a string. + Supported format: + (now|today|yesterday|DATE)([+-]\d+(microsecond|second|minute|hour|day|week|month|year)s?)? + + @param format strftime format of DATE + @param precision Round the datetime object: auto|microsecond|second|minute|hour|day + auto: round to the unit provided in date_str (if applicable). """ auto_precision = False if precision == 'auto': @@ -1887,7 +1893,7 @@ def datetime_from_str(date_str, precision='auto', format='%Y%m%d'): if date_str == 'yesterday': return today - datetime.timedelta(days=1) match = re.match( - r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)(s)?', + r'(?P<start>.+)(?P<sign>[+-])(?P<time>\d+)(?P<unit>microsecond|second|minute|hour|day|week|month|year)s?', date_str) if match is not None: start_time = datetime_from_str(match.group('start'), precision, format) @@ -1910,16 +1916,14 @@ def datetime_from_str(date_str, precision='auto', format='%Y%m%d'): def date_from_str(date_str, format='%Y%m%d', strict=False): - """ - Return a datetime object from a string in the format YYYYMMDD or - (now|today|yesterday|date)[+-][0-9](microsecond|second|minute|hour|day|week|month|year)(s)? - - If "strict", only (now|today)[+-][0-9](day|week|month|year)(s)? is allowed + R""" + Return a date object from a string using datetime_from_str - format: string date format used to return datetime object from + @param strict Restrict allowed patterns to "YYYYMMDD" and + (now|today|yesterday)(-\d+(day|week|month|year)s?)? """ - if strict and not re.fullmatch(r'\d{8}|(now|today)[+-]\d+(day|week|month|year)(s)?', date_str): - raise ValueError(f'Invalid date format {date_str}') + if strict and not re.fullmatch(r'\d{8}|(now|today|yesterday)(-\d+(day|week|month|year)s?)?', date_str): + raise ValueError(f'Invalid date format "{date_str}"') return datetime_from_str(date_str, precision='microsecond', format=format).date() @@ -1960,7 +1964,7 @@ def hyphenate_date(date_str): return date_str -class DateRange(object): +class DateRange: """Represents a time interval between two dates""" def __init__(self, start=None, end=None): @@ -1988,121 +1992,81 @@ class DateRange(object): return self.start <= date <= self.end def __str__(self): - return '%s - %s' % (self.start.isoformat(), self.end.isoformat()) + return f'{self.start.isoformat()} - {self.end.isoformat()}' + def __eq__(self, other): + return (isinstance(other, DateRange) + and self.start == other.start and self.end == other.end) -def platform_name(): - """ Returns the platform name as a compat_str """ - res = platform.platform() - if isinstance(res, bytes): - res = res.decode(preferredencoding()) - assert isinstance(res, compat_str) - return res +def platform_name(): + """ Returns the platform name as a str """ + deprecation_warning(f'"{__name__}.platform_name" is deprecated, use "platform.platform" instead') + return platform.platform() + + +@functools.cache +def system_identifier(): + python_implementation = platform.python_implementation() + if python_implementation == 'PyPy' and hasattr(sys, 'pypy_version_info'): + python_implementation += ' version %d.%d.%d' % sys.pypy_version_info[:3] + libc_ver = [] + with contextlib.suppress(OSError): # We may not have access to the executable + libc_ver = platform.libc_ver() + + return 'Python %s (%s %s %s) - %s (%s%s)' % ( + platform.python_version(), + python_implementation, + platform.machine(), + platform.architecture()[0], + platform.platform(), + ssl.OPENSSL_VERSION, + format_field(join_nonempty(*libc_ver, delim=' '), None, ', %s'), + ) +@functools.cache def get_windows_version(): - ''' Get Windows version. None if it's not running on Windows ''' + ''' Get Windows version. returns () if it's not running on Windows ''' if compat_os_name == 'nt': return version_tuple(platform.win32_ver()[1]) else: - return None - + return () -def _windows_write_string(s, out): - """ Returns True if the string was written using special methods, - False if it has yet to be written out.""" - # Adapted from http://stackoverflow.com/a/3259271/35070 - import ctypes.wintypes - - WIN_OUTPUT_IDS = { - 1: -11, - 2: -12, - } - - try: - fileno = out.fileno() - except AttributeError: - # If the output stream doesn't have a fileno, it's virtual - return False - except io.UnsupportedOperation: - # Some strange Windows pseudo files? - return False - if fileno not in WIN_OUTPUT_IDS: - return False - - GetStdHandle = compat_ctypes_WINFUNCTYPE( - ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)( - ('GetStdHandle', ctypes.windll.kernel32)) - h = GetStdHandle(WIN_OUTPUT_IDS[fileno]) - - WriteConsoleW = compat_ctypes_WINFUNCTYPE( - ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR, - ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD), - ctypes.wintypes.LPVOID)(('WriteConsoleW', ctypes.windll.kernel32)) - written = ctypes.wintypes.DWORD(0) - - GetFileType = compat_ctypes_WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)(('GetFileType', ctypes.windll.kernel32)) - FILE_TYPE_CHAR = 0x0002 - FILE_TYPE_REMOTE = 0x8000 - GetConsoleMode = compat_ctypes_WINFUNCTYPE( - ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, - ctypes.POINTER(ctypes.wintypes.DWORD))( - ('GetConsoleMode', ctypes.windll.kernel32)) - INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value - - def not_a_console(handle): - if handle == INVALID_HANDLE_VALUE or handle is None: - return True - return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR - or GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0) +def write_string(s, out=None, encoding=None): + assert isinstance(s, str) + out = out or sys.stderr - if not_a_console(h): - return False + if compat_os_name == 'nt' and supports_terminal_sequences(out): + s = re.sub(r'([\r\n]+)', r' \1', s) - def next_nonbmp_pos(s): - try: - return next(i for i, c in enumerate(s) if ord(c) > 0xffff) - except StopIteration: - return len(s) - - while s: - count = min(next_nonbmp_pos(s), 1024) - - ret = WriteConsoleW( - h, s, count if count else 2, ctypes.byref(written), None) - if ret == 0: - raise OSError('Failed to write string') - if not count: # We just wrote a non-BMP character - assert written.value == 2 - s = s[1:] - else: - assert written.value > 0 - s = s[written.value:] - return True + enc, buffer = None, out + if 'b' in getattr(out, 'mode', ''): + enc = encoding or preferredencoding() + elif hasattr(out, 'buffer'): + buffer = out.buffer + enc = encoding or getattr(out, 'encoding', None) or preferredencoding() + buffer.write(s.encode(enc, 'ignore') if enc else s) + out.flush() -def write_string(s, out=None, encoding=None): - if out is None: - out = sys.stderr - assert type(s) == compat_str - if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'): - if _windows_write_string(s, out): +def deprecation_warning(msg, *, printer=None, stacklevel=0, **kwargs): + from . import _IN_CLI + if _IN_CLI: + if msg in deprecation_warning._cache: return - - if ('b' in getattr(out, 'mode', '') - or sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr - byt = s.encode(encoding or preferredencoding(), 'ignore') - out.write(byt) - elif hasattr(out, 'buffer'): - enc = encoding or getattr(out, 'encoding', None) or preferredencoding() - byt = s.encode(enc, 'ignore') - out.buffer.write(byt) + deprecation_warning._cache.add(msg) + if printer: + return printer(f'{msg}{bug_reports_message()}', **kwargs) + return write_string(f'ERROR: {msg}{bug_reports_message()}\n', **kwargs) else: - out.write(s) - out.flush() + import warnings + warnings.warn(DeprecationWarning(msg), stacklevel=stacklevel + 3) + + +deprecation_warning._cache = set() def bytes_to_intlist(bs): @@ -2117,11 +2081,19 @@ def bytes_to_intlist(bs): def intlist_to_bytes(xs): if not xs: return b'' - return compat_struct_pack('%dB' % len(xs), *xs) + return struct.pack('%dB' % len(xs), *xs) + + +class LockingUnsupportedError(OSError): + msg = 'File locking is not supported' + + def __init__(self): + super().__init__(self.msg) # Cross-platform file locking if sys.platform == 'win32': + import ctypes import ctypes.wintypes import msvcrt @@ -2167,7 +2139,8 @@ if sys.platform == 'win32': if not LockFileEx(msvcrt.get_osfhandle(f.fileno()), (0x2 if exclusive else 0x0) | (0x0 if block else 0x1), 0, whole_low, whole_high, f._lock_file_overlapped_p): - raise BlockingIOError('Locking file failed: %r' % ctypes.FormatError()) + # NB: No argument form of "ctypes.FormatError" does not work on PyPy + raise BlockingIOError(f'Locking file failed: {ctypes.FormatError(ctypes.GetLastError())!r}') def _unlock_file(f): assert f._lock_file_overlapped_p @@ -2180,18 +2153,15 @@ else: import fcntl def _lock_file(f, exclusive, block): + flags = fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH + if not block: + flags |= fcntl.LOCK_NB try: - fcntl.flock(f, - fcntl.LOCK_SH if not exclusive - else fcntl.LOCK_EX if block - else fcntl.LOCK_EX | fcntl.LOCK_NB) + fcntl.flock(f, flags) except BlockingIOError: raise except OSError: # AOSP does not have flock() - fcntl.lockf(f, - fcntl.LOCK_SH if not exclusive - else fcntl.LOCK_EX if block - else fcntl.LOCK_EX | fcntl.LOCK_NB) + fcntl.lockf(f, flags) def _unlock_file(f): try: @@ -2200,60 +2170,80 @@ else: fcntl.lockf(f, fcntl.LOCK_UN) except ImportError: - UNSUPPORTED_MSG = 'file locking is not supported on this platform' def _lock_file(f, exclusive, block): - raise IOError(UNSUPPORTED_MSG) + raise LockingUnsupportedError() def _unlock_file(f): - raise IOError(UNSUPPORTED_MSG) + raise LockingUnsupportedError() -class locked_file(object): - _closed = False +class locked_file: + locked = False def __init__(self, filename, mode, block=True, encoding=None): - assert mode in ['r', 'rb', 'a', 'ab', 'w', 'wb'] - self.f = io.open(filename, mode, encoding=encoding) - self.mode = mode - self.block = block + if mode not in {'r', 'rb', 'a', 'ab', 'w', 'wb'}: + raise NotImplementedError(mode) + self.mode, self.block = mode, block + + writable = any(f in mode for f in 'wax+') + readable = any(f in mode for f in 'r+') + flags = functools.reduce(operator.ior, ( + getattr(os, 'O_CLOEXEC', 0), # UNIX only + getattr(os, 'O_BINARY', 0), # Windows only + getattr(os, 'O_NOINHERIT', 0), # Windows only + os.O_CREAT if writable else 0, # O_TRUNC only after locking + os.O_APPEND if 'a' in mode else 0, + os.O_EXCL if 'x' in mode else 0, + os.O_RDONLY if not writable else os.O_RDWR if readable else os.O_WRONLY, + )) + + self.f = os.fdopen(os.open(filename, flags, 0o666), mode, encoding=encoding) def __enter__(self): exclusive = 'r' not in self.mode try: _lock_file(self.f, exclusive, self.block) - except IOError: + self.locked = True + except OSError: self.f.close() raise + if 'w' in self.mode: + try: + self.f.truncate() + except OSError as e: + if e.errno not in ( + errno.ESPIPE, # Illegal seek - expected for FIFO + errno.EINVAL, # Invalid argument - expected for /dev/null + ): + raise return self - def __exit__(self, etype, value, traceback): + def unlock(self): + if not self.locked: + return try: - if not self._closed: - _unlock_file(self.f) + _unlock_file(self.f) finally: - self.f.close() - self._closed = True + self.locked = False - def __iter__(self): - return iter(self.f) - - def write(self, *args): - return self.f.write(*args) - - def read(self, *args): - return self.f.read(*args) + def __exit__(self, *_): + try: + self.unlock() + finally: + self.f.close() - def flush(self): - self.f.flush() + open = __enter__ + close = __exit__ - def open(self): - return self.__enter__() + def __getattr__(self, attr): + return getattr(self.f, attr) - def close(self, *args): - self.__exit__(self, *args, value=False, traceback=False) + def __iter__(self): + return iter(self.f) +@functools.cache def get_filesystem_encoding(): encoding = sys.getfilesystemencoding() return encoding if encoding is not None else 'utf-8' @@ -2275,7 +2265,7 @@ def smuggle_url(url, data): url, idata = unsmuggle_url(url, {}) data.update(idata) - sdata = compat_urllib_parse_urlencode( + sdata = urllib.parse.urlencode( {'__youtubedl_smuggle': json.dumps(data)}) return url + '#' + sdata @@ -2284,7 +2274,7 @@ def unsmuggle_url(smug_url, default=None): if '#__youtubedl_smuggle' not in smug_url: return smug_url, default url, _, sdata = smug_url.rpartition('#') - jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0] + jsond = urllib.parse.parse_qs(sdata)['__youtubedl_smuggle'][0] data = json.loads(jsond) return url, data @@ -2307,15 +2297,24 @@ def format_bytes(bytes): return format_decimal_suffix(bytes, '%.2f%sB', factor=1024) or 'N/A' -def lookup_unit_table(unit_table, s): +def lookup_unit_table(unit_table, s, strict=False): + num_re = NUMBER_RE if strict else NUMBER_RE.replace(R'\.', '[,.]') units_re = '|'.join(re.escape(u) for u in unit_table) - m = re.match( - r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)\b' % units_re, s) + m = (re.fullmatch if strict else re.match)( + rf'(?P<num>{num_re})\s*(?P<unit>{units_re})\b', s) if not m: return None - num_str = m.group('num').replace(',', '.') + + num = float(m.group('num').replace(',', '.')) mult = unit_table[m.group('unit')] - return int(float(num_str) * mult) + return round(num * mult) + + +def parse_bytes(s): + """Parse a string indicating a byte quantity into an integer""" + return lookup_unit_table( + {u: 1024**i for i, u in enumerate(['', *'KMGTPEZY'])}, + s.upper(), strict=True) def parse_filesize(s): @@ -2444,7 +2443,7 @@ def parse_resolution(s, *, lenient=False): def parse_bitrate(s): - if not isinstance(s, compat_str): + if not isinstance(s, str): return mobj = re.search(r'\b(\d+)\s*kbps', s) if mobj: @@ -2481,11 +2480,12 @@ def fix_xml_ampersands(xml_str): def setproctitle(title): - assert isinstance(title, compat_str) + assert isinstance(title, str) - # ctypes in Jython is not complete - # http://bugs.jython.org/issue2148 - if sys.platform.startswith('java'): + # Workaround for https://github.com/hypervideo/hypervideo/issues/4541 + try: + import ctypes + except ImportError: return try: @@ -2497,7 +2497,7 @@ def setproctitle(title): # a bytestring, but since unicode_literals turns # every string into a unicode string, it fails. return - title_bytes = title.encode('utf-8') + title_bytes = title.encode() buf = ctypes.create_string_buffer(len(title_bytes)) buf.value = title_bytes try: @@ -2524,40 +2524,43 @@ def remove_quotes(s): def get_domain(url): - domain = re.match(r'(?:https?:\/\/)?(?:www\.)?(?P<domain>[^\n\/]+\.[^\n\/]+)(?:\/(.*))?', url) - return domain.group('domain') if domain else None + """ + This implementation is inconsistent, but is kept for compatibility. + Use this only for "webpage_url_domain" + """ + return remove_start(urllib.parse.urlparse(url).netloc, 'www.') or None def url_basename(url): - path = compat_urlparse.urlparse(url).path + path = urllib.parse.urlparse(url).path return path.strip('/').split('/')[-1] def base_url(url): - return re.match(r'https?://[^?#&]+/', url).group() + return re.match(r'https?://[^?#]+/', url).group() def urljoin(base, path): if isinstance(path, bytes): - path = path.decode('utf-8') - if not isinstance(path, compat_str) or not path: + path = path.decode() + if not isinstance(path, str) or not path: return None if re.match(r'^(?:[a-zA-Z][a-zA-Z0-9+-.]*:)?//', path): return path if isinstance(base, bytes): - base = base.decode('utf-8') - if not isinstance(base, compat_str) or not re.match( + base = base.decode() + if not isinstance(base, str) or not re.match( r'^(?:https?:)?//', base): return None - return compat_urlparse.urljoin(base, path) + return urllib.parse.urljoin(base, path) -class HEADRequest(compat_urllib_request.Request): +class HEADRequest(urllib.request.Request): def get_method(self): return 'HEAD' -class PUTRequest(compat_urllib_request.Request): +class PUTRequest(urllib.request.Request): def get_method(self): return 'PUT' @@ -2572,14 +2575,14 @@ def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): def str_or_none(v, default=None): - return default if v is None else compat_str(v) + return default if v is None else str(v) def str_to_int(int_str): """ A more relaxed version of int_or_none """ - if isinstance(int_str, compat_integer_types): + if isinstance(int_str, int): return int_str - elif isinstance(int_str, compat_str): + elif isinstance(int_str, str): int_str = re.sub(r'[,\.\+]', '', int_str) return int_or_none(int_str) @@ -2598,18 +2601,18 @@ def bool_or_none(v, default=None): def strip_or_none(v, default=None): - return v.strip() if isinstance(v, compat_str) else default + return v.strip() if isinstance(v, str) else default def url_or_none(url): - if not url or not isinstance(url, compat_str): + if not url or not isinstance(url, str): return None url = url.strip() return url if re.match(r'^(?:(?:https?|rt(?:m(?:pt?[es]?|fp)|sp[su]?)|mms|ftps?):)?//', url) else None def request_to_url(req): - if isinstance(req, compat_urllib_request.Request): + if isinstance(req, urllib.request.Request): return req.get_full_url() else: return req @@ -2618,17 +2621,21 @@ def request_to_url(req): def strftime_or_none(timestamp, date_format, default=None): datetime_object = None try: - if isinstance(timestamp, compat_numeric_types): # unix timestamp - datetime_object = datetime.datetime.utcfromtimestamp(timestamp) - elif isinstance(timestamp, compat_str): # assume YYYYMMDD + if isinstance(timestamp, (int, float)): # unix timestamp + # Using naive datetime here can break timestamp() in Windows + # Ref: https://github.com/hypervideo/hypervideo/issues/5185, https://github.com/python/cpython/issues/94414 + datetime_object = datetime.datetime.fromtimestamp(timestamp, datetime.timezone.utc) + elif isinstance(timestamp, str): # assume YYYYMMDD datetime_object = datetime.datetime.strptime(timestamp, '%Y%m%d') + date_format = re.sub( # Support %s on windows + r'(?<!%)(%%)*%s', rf'\g<1>{int(datetime_object.timestamp())}', date_format) return datetime_object.strftime(date_format) except (ValueError, TypeError, AttributeError): return default def parse_duration(s): - if not isinstance(s, compat_basestring): + if not isinstance(s, str): return None s = s.strip() if not s: @@ -2677,31 +2684,23 @@ def parse_duration(s): else: return None - duration = 0 - if secs: - duration += float(secs) - if mins: - duration += float(mins) * 60 - if hours: - duration += float(hours) * 60 * 60 - if days: - duration += float(days) * 24 * 60 * 60 if ms: - duration += float(ms.replace(':', '.')) - return duration + ms = ms.replace(':', '.') + return sum(float(part or 0) * mult for part, mult in ( + (days, 86400), (hours, 3600), (mins, 60), (secs, 1), (ms, 1))) def prepend_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) return ( - '{0}.{1}{2}'.format(name, ext, real_ext) + f'{name}.{ext}{real_ext}' if not expected_real_ext or real_ext[1:] == expected_real_ext - else '{0}.{1}'.format(filename, ext)) + else f'{filename}.{ext}') def replace_extension(filename, ext, expected_real_ext=None): name, real_ext = os.path.splitext(filename) - return '{0}.{1}'.format( + return '{}.{}'.format( name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename, ext) @@ -2710,31 +2709,26 @@ def check_executable(exe, args=[]): """ Checks if the given binary is installed somewhere in PATH, and returns its name. args can be a list of arguments for a short output (like -version) """ try: - Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate_or_kill() + Popen.run([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError: return False return exe -def _get_exe_version_output(exe, args, *, to_screen=None): - if to_screen: - to_screen(f'Checking exe version: {shell_quote([exe] + args)}') +def _get_exe_version_output(exe, args): try: # STDIN should be redirected too. On UNIX-like systems, ffmpeg triggers # SIGTTOU if hypervideo is run in the background. # See https://github.com/ytdl-org/youtube-dl/issues/955#issuecomment-209789656 - out, _ = Popen( - [encodeArgument(exe)] + args, stdin=subprocess.PIPE, - stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate_or_kill() + stdout, _, _ = Popen.run([encodeArgument(exe)] + args, text=True, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except OSError: return False - if isinstance(out, bytes): # Python 2.x - out = out.decode('ascii', 'ignore') - return out + return stdout def detect_exe_version(output, version_re=None, unrecognized='present'): - assert isinstance(output, compat_str) + assert isinstance(output, str) if version_re is None: version_re = r'version\s+([-0-9._a-zA-Z]+)' m = re.search(version_re, output) @@ -2752,50 +2746,59 @@ def get_exe_version(exe, args=['--version'], return detect_exe_version(out, version_re, unrecognized) if out else False +def frange(start=0, stop=None, step=1): + """Float range""" + if stop is None: + start, stop = 0, start + sign = [-1, 1][step > 0] if step else 0 + while sign * start < sign * stop: + yield start + start += step + + class LazyList(collections.abc.Sequence): - ''' Lazy immutable list from an iterable - Note that slices of a LazyList are lists and not LazyList''' + """Lazy immutable list from an iterable + Note that slices of a LazyList are lists and not LazyList""" class IndexError(IndexError): pass def __init__(self, iterable, *, reverse=False, _cache=None): - self.__iterable = iter(iterable) - self.__cache = [] if _cache is None else _cache - self.__reversed = reverse + self._iterable = iter(iterable) + self._cache = [] if _cache is None else _cache + self._reversed = reverse def __iter__(self): - if self.__reversed: + if self._reversed: # We need to consume the entire iterable to iterate in reverse yield from self.exhaust() return - yield from self.__cache - for item in self.__iterable: - self.__cache.append(item) + yield from self._cache + for item in self._iterable: + self._cache.append(item) yield item - def __exhaust(self): - self.__cache.extend(self.__iterable) - # Discard the emptied iterable to make it pickle-able - self.__iterable = [] - return self.__cache + def _exhaust(self): + self._cache.extend(self._iterable) + self._iterable = [] # Discard the emptied iterable to make it pickle-able + return self._cache def exhaust(self): - ''' Evaluate the entire iterable ''' - return self.__exhaust()[::-1 if self.__reversed else 1] + """Evaluate the entire iterable""" + return self._exhaust()[::-1 if self._reversed else 1] @staticmethod - def __reverse_index(x): - return None if x is None else -(x + 1) + def _reverse_index(x): + return None if x is None else ~x def __getitem__(self, idx): if isinstance(idx, slice): - if self.__reversed: - idx = slice(self.__reverse_index(idx.start), self.__reverse_index(idx.stop), -(idx.step or 1)) + if self._reversed: + idx = slice(self._reverse_index(idx.start), self._reverse_index(idx.stop), -(idx.step or 1)) start, stop, step = idx.start, idx.stop, idx.step or 1 elif isinstance(idx, int): - if self.__reversed: - idx = self.__reverse_index(idx) + if self._reversed: + idx = self._reverse_index(idx) start, stop, step = idx, idx, 0 else: raise TypeError('indices must be integers or slices') @@ -2804,35 +2807,35 @@ class LazyList(collections.abc.Sequence): or (stop is None and step > 0)): # We need to consume the entire iterable to be able to slice from the end # Obviously, never use this with infinite iterables - self.__exhaust() + self._exhaust() try: - return self.__cache[idx] + return self._cache[idx] except IndexError as e: raise self.IndexError(e) from e - n = max(start or 0, stop or 0) - len(self.__cache) + 1 + n = max(start or 0, stop or 0) - len(self._cache) + 1 if n > 0: - self.__cache.extend(itertools.islice(self.__iterable, n)) + self._cache.extend(itertools.islice(self._iterable, n)) try: - return self.__cache[idx] + return self._cache[idx] except IndexError as e: raise self.IndexError(e) from e def __bool__(self): try: - self[-1] if self.__reversed else self[0] + self[-1] if self._reversed else self[0] except self.IndexError: return False return True def __len__(self): - self.__exhaust() - return len(self.__cache) + self._exhaust() + return len(self._cache) def __reversed__(self): - return type(self)(self.__iterable, reverse=not self.__reversed, _cache=self.__cache) + return type(self)(self._iterable, reverse=not self._reversed, _cache=self._cache) def __copy__(self): - return type(self)(self.__iterable, reverse=self.__reversed, _cache=self.__cache) + return type(self)(self._iterable, reverse=self._reversed, _cache=self._cache) def __repr__(self): # repr and str should mimic a list. So we exhaust the iterable @@ -2884,6 +2887,7 @@ class PagedList: class OnDemandPagedList(PagedList): """Download pages until a page with less than maximum results""" + def _getslice(self, start, end): for pagenum in itertools.count(start // self._pagesize): firstid = pagenum * self._pagesize @@ -2924,6 +2928,7 @@ class OnDemandPagedList(PagedList): class InAdvancePagedList(PagedList): """PagedList with total number of pages known in advance""" + def __init__(self, pagefunc, pagecount, pagesize): PagedList.__init__(self, pagefunc, pagesize, True) self._pagecount = pagecount @@ -2947,6 +2952,140 @@ class InAdvancePagedList(PagedList): yield from page_results +class PlaylistEntries: + MissingEntry = object() + is_exhausted = False + + def __init__(self, ydl, info_dict): + self.ydl = ydl + + # _entries must be assigned now since infodict can change during iteration + entries = info_dict.get('entries') + if entries is None: + raise EntryNotInPlaylist('There are no entries') + elif isinstance(entries, list): + self.is_exhausted = True + + requested_entries = info_dict.get('requested_entries') + self.is_incomplete = requested_entries is not None + if self.is_incomplete: + assert self.is_exhausted + self._entries = [self.MissingEntry] * max(requested_entries or [0]) + for i, entry in zip(requested_entries, entries): + self._entries[i - 1] = entry + elif isinstance(entries, (list, PagedList, LazyList)): + self._entries = entries + else: + self._entries = LazyList(entries) + + PLAYLIST_ITEMS_RE = re.compile(r'''(?x) + (?P<start>[+-]?\d+)? + (?P<range>[:-] + (?P<end>[+-]?\d+|inf(?:inite)?)? + (?::(?P<step>[+-]?\d+))? + )?''') + + @classmethod + def parse_playlist_items(cls, string): + for segment in string.split(','): + if not segment: + raise ValueError('There is two or more consecutive commas') + mobj = cls.PLAYLIST_ITEMS_RE.fullmatch(segment) + if not mobj: + raise ValueError(f'{segment!r} is not a valid specification') + start, end, step, has_range = mobj.group('start', 'end', 'step', 'range') + if int_or_none(step) == 0: + raise ValueError(f'Step in {segment!r} cannot be zero') + yield slice(int_or_none(start), float_or_none(end), int_or_none(step)) if has_range else int(start) + + def get_requested_items(self): + playlist_items = self.ydl.params.get('playlist_items') + playlist_start = self.ydl.params.get('playliststart', 1) + playlist_end = self.ydl.params.get('playlistend') + # For backwards compatibility, interpret -1 as whole list + if playlist_end in (-1, None): + playlist_end = '' + if not playlist_items: + playlist_items = f'{playlist_start}:{playlist_end}' + elif playlist_start != 1 or playlist_end: + self.ydl.report_warning('Ignoring playliststart and playlistend because playlistitems was given', only_once=True) + + for index in self.parse_playlist_items(playlist_items): + for i, entry in self[index]: + yield i, entry + if not entry: + continue + try: + # TODO: Add auto-generated fields + self.ydl._match_entry(entry, incomplete=True, silent=True) + except (ExistingVideoReached, RejectedVideoReached): + return + + def get_full_count(self): + if self.is_exhausted and not self.is_incomplete: + return len(self) + elif isinstance(self._entries, InAdvancePagedList): + if self._entries._pagesize == 1: + return self._entries._pagecount + + @functools.cached_property + def _getter(self): + if isinstance(self._entries, list): + def get_entry(i): + try: + entry = self._entries[i] + except IndexError: + entry = self.MissingEntry + if not self.is_incomplete: + raise self.IndexError() + if entry is self.MissingEntry: + raise EntryNotInPlaylist(f'Entry {i + 1} cannot be found') + return entry + else: + def get_entry(i): + try: + return type(self.ydl)._handle_extraction_exceptions(lambda _, i: self._entries[i])(self.ydl, i) + except (LazyList.IndexError, PagedList.IndexError): + raise self.IndexError() + return get_entry + + def __getitem__(self, idx): + if isinstance(idx, int): + idx = slice(idx, idx) + + # NB: PlaylistEntries[1:10] => (0, 1, ... 9) + step = 1 if idx.step is None else idx.step + if idx.start is None: + start = 0 if step > 0 else len(self) - 1 + else: + start = idx.start - 1 if idx.start >= 0 else len(self) + idx.start + + # NB: Do not call len(self) when idx == [:] + if idx.stop is None: + stop = 0 if step < 0 else float('inf') + else: + stop = idx.stop - 1 if idx.stop >= 0 else len(self) + idx.stop + stop += [-1, 1][step > 0] + + for i in frange(start, stop, step): + if i < 0: + continue + try: + entry = self._getter(i) + except self.IndexError: + self.is_exhausted = True + if step > 0: + break + continue + yield i + 1, entry + + def __len__(self): + return len(tuple(self[:])) + + class IndexError(IndexError): + pass + + def uppercase_escape(s): unicode_escape = codecs.getdecoder('unicode_escape') return re.sub( @@ -2965,14 +3104,12 @@ def lowercase_escape(s): def escape_rfc3986(s): """Escape non-ASCII characters as suggested by RFC 3986""" - if sys.version_info < (3, 0) and isinstance(s, compat_str): - s = s.encode('utf-8') - return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") + return urllib.parse.quote(s, b"%/;:@&=+$,!~*'()?#[]") def escape_url(url): """Escape URL as suggested by RFC 3986""" - url_parsed = compat_urllib_parse_urlparse(url) + url_parsed = urllib.parse.urlparse(url) return url_parsed._replace( netloc=url_parsed.netloc.encode('idna').decode('ascii'), path=escape_rfc3986(url_parsed.path), @@ -2982,13 +3119,13 @@ def escape_url(url): ).geturl() -def parse_qs(url): - return compat_parse_qs(compat_urllib_parse_urlparse(url).query) +def parse_qs(url, **kwargs): + return urllib.parse.parse_qs(urllib.parse.urlparse(url).query, **kwargs) def read_batch_urls(batch_fd): def fixup(url): - if not isinstance(url, compat_str): + if not isinstance(url, str): url = url.decode('utf-8', 'replace') BOM_UTF8 = ('\xef\xbb\xbf', '\ufeff') for bom in BOM_UTF8: @@ -2998,7 +3135,7 @@ def read_batch_urls(batch_fd): if not url or url.startswith(('#', ';', ']')): return False # "#" cannot be stripped out since it is part of the URI - # However, it can be safely stipped out if follwing a whitespace + # However, it can be safely stripped out if following a whitespace return re.split(r'\s#', url, 1)[0].rstrip() with contextlib.closing(batch_fd) as fd: @@ -3006,22 +3143,22 @@ def read_batch_urls(batch_fd): def urlencode_postdata(*args, **kargs): - return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii') + return urllib.parse.urlencode(*args, **kargs).encode('ascii') def update_url_query(url, query): if not query: return url - parsed_url = compat_urlparse.urlparse(url) - qs = compat_parse_qs(parsed_url.query) + parsed_url = urllib.parse.urlparse(url) + qs = urllib.parse.parse_qs(parsed_url.query) qs.update(query) - return compat_urlparse.urlunparse(parsed_url._replace( - query=compat_urllib_parse_urlencode(qs, True))) + return urllib.parse.urlunparse(parsed_url._replace( + query=urllib.parse.urlencode(qs, True))) -def update_Request(req, url=None, data=None, headers={}, query={}): +def update_Request(req, url=None, data=None, headers=None, query=None): req_headers = req.headers.copy() - req_headers.update(headers) + req_headers.update(headers or {}) req_data = data or req.data req_url = update_url_query(url or req.get_full_url(), query) req_get_method = req.get_method() @@ -3030,7 +3167,7 @@ def update_Request(req, url=None, data=None, headers={}, query={}): elif req_get_method == 'PUT': req_type = PUTRequest else: - req_type = compat_urllib_request.Request + req_type = urllib.request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) @@ -3045,10 +3182,10 @@ def _multipart_encode_impl(data, boundary): out = b'' for k, v in data.items(): out += b'--' + boundary.encode('ascii') + b'\r\n' - if isinstance(k, compat_str): - k = k.encode('utf-8') - if isinstance(v, compat_str): - v = v.encode('utf-8') + if isinstance(k, str): + k = k.encode() + if isinstance(v, str): + v = v.encode() # RFC 2047 requires non-ASCII field names to be encoded, while RFC 7578 # suggests sending UTF-8 directly. Firefox sends UTF-8, too content = b'Content-Disposition: form-data; name="' + k + b'"\r\n\r\n' + v + b'\r\n' @@ -3091,6 +3228,10 @@ def multipart_encode(data, boundary=None): return out, content_type +def variadic(x, allowed_types=(str, bytes, dict)): + return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,) + + def dict_get(d, key_or_keys, default=None, skip_false_values=True): for val in map(d.get, variadic(key_or_keys)): if val is not None and (val or not skip_false_values): @@ -3102,7 +3243,7 @@ def try_call(*funcs, expected_type=None, args=[], kwargs={}): for f in funcs: try: val = f(*args, **kwargs) - except (AttributeError, KeyError, TypeError, IndexError, ZeroDivisionError): + except (AttributeError, KeyError, TypeError, IndexError, ValueError, ZeroDivisionError): pass else: if expected_type is None or isinstance(val, expected_type): @@ -3128,7 +3269,7 @@ def merge_dicts(*dicts): def encode_compat_str(string, encoding=preferredencoding(), errors='strict'): - return string if isinstance(string, compat_str) else compat_str(string, encoding, errors) + return string if isinstance(string, str) else str(string, encoding, errors) US_RATINGS = { @@ -3151,9 +3292,10 @@ TV_PARENTAL_GUIDELINES = { def parse_age_limit(s): - if type(s) == int: + # isinstance(False, int) is True. So type() must be used instead + if type(s) is int: # noqa: E721 return s if 0 <= s <= 21 else None - if not isinstance(s, compat_basestring): + elif not isinstance(s, str): return None m = re.match(r'^(?P<age>\d{1,2})\+?$', s) if m: @@ -3177,15 +3319,26 @@ def strip_jsonp(code): r'\g<callback_data>', code) -def js_to_json(code, vars={}): +def js_to_json(code, vars={}, *, strict=False): # vars is a dict of var, val pairs to substitute + STRING_QUOTES = '\'"' + STRING_RE = '|'.join(rf'{q}(?:\\.|[^\\{q}])*{q}' for q in STRING_QUOTES) COMMENT_RE = r'/\*(?:(?!\*/).)*?\*/|//[^\n]*\n' - SKIP_RE = r'\s*(?:{comment})?\s*'.format(comment=COMMENT_RE) + SKIP_RE = fr'\s*(?:{COMMENT_RE})?\s*' INTEGER_TABLE = ( - (r'(?s)^(0[xX][0-9a-fA-F]+){skip}:?$'.format(skip=SKIP_RE), 16), - (r'(?s)^(0+[0-7]+){skip}:?$'.format(skip=SKIP_RE), 8), + (fr'(?s)^(0[xX][0-9a-fA-F]+){SKIP_RE}:?$', 16), + (fr'(?s)^(0+[0-7]+){SKIP_RE}:?$', 8), ) + def process_escape(match): + JSON_PASSTHROUGH_ESCAPES = R'"\bfnrtu' + escape = match.group(1) or match.group(2) + + return (Rf'\{escape}' if escape in JSON_PASSTHROUGH_ESCAPES + else R'\u00' if escape == 'x' + else '' if escape == '\n' + else escape) + def fix_kv(m): v = m.group(0) if v in ('true', 'false', 'null'): @@ -3193,38 +3346,42 @@ def js_to_json(code, vars={}): elif v in ('undefined', 'void 0'): return 'null' elif v.startswith('/*') or v.startswith('//') or v.startswith('!') or v == ',': - return "" - - if v[0] in ("'", '"'): - v = re.sub(r'(?s)\\.|"', lambda m: { - '"': '\\"', - "\\'": "'", - '\\\n': '', - '\\x': '\\u00', - }.get(m.group(0), m.group(0)), v[1:-1]) - else: - for regex, base in INTEGER_TABLE: - im = re.match(regex, v) - if im: - i = int(im.group(1), base) - return '"%d":' % i if v.endswith(':') else '%d' % i + return '' + + if v[0] in STRING_QUOTES: + escaped = re.sub(r'(?s)(")|\\(.)', process_escape, v[1:-1]) + return f'"{escaped}"' + + for regex, base in INTEGER_TABLE: + im = re.match(regex, v) + if im: + i = int(im.group(1), base) + return f'"{i}":' if v.endswith(':') else str(i) - if v in vars: - return vars[v] + if v in vars: + return json.dumps(vars[v]) - return '"%s"' % v + if not strict: + return f'"{v}"' - code = re.sub(r'new Date\((".+")\)', r'\g<1>', code) + raise ValueError(f'Unknown value: {v}') - return re.sub(r'''(?sx) - "(?:[^"\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^"\\]*"| - '(?:[^'\\]*(?:\\\\|\\['"nurtbfx/\n]))*[^'\\]*'| - {comment}|,(?={skip}[\]}}])| + def create_map(mobj): + return json.dumps(dict(json.loads(js_to_json(mobj.group(1) or '[]', vars=vars)))) + + code = re.sub(r'new Map\((\[.*?\])?\)', create_map, code) + if not strict: + code = re.sub(r'new Date\((".+")\)', r'\g<1>', code) + code = re.sub(r'new \w+\((.*?)\)', lambda m: json.dumps(m.group(0)), code) + + return re.sub(rf'''(?sx) + {STRING_RE}| + {COMMENT_RE}|,(?={SKIP_RE}[\]}}])| void\s0|(?:(?<![0-9])[eE]|[a-df-zA-DF-Z_$])[.a-zA-Z_$0-9]*| - \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{skip}:)?| - [0-9]+(?={skip}:)| + \b(?:0[xX][0-9a-fA-F]+|0+[0-7]+)(?:{SKIP_RE}:)?| + [0-9]+(?={SKIP_RE}:)| !+ - '''.format(comment=COMMENT_RE, skip=SKIP_RE), fix_kv, code) + ''', fix_kv, code) def qualities(quality_ids): @@ -3237,7 +3394,7 @@ def qualities(quality_ids): return q -POSTPROCESS_WHEN = {'pre_process', 'after_filter', 'before_dl', 'after_move', 'post_process', 'after_video', 'playlist'} +POSTPROCESS_WHEN = ('pre_process', 'after_filter', 'before_dl', 'post_process', 'after_move', 'after_video', 'playlist') DEFAULT_OUTTMPL = { @@ -3315,12 +3472,7 @@ def args_to_str(args): def error_to_compat_str(err): - err_str = str(err) - # On python 2 error byte string must be decoded with proper - # encoding rather than ascii - if sys.version_info[0] < 3: - err_str = err_str.decode(preferredencoding()) - return err_str + return str(err) def error_to_str(err): @@ -3405,34 +3557,33 @@ def parse_codecs(codecs_str): return {} split_codecs = list(filter(None, map( str.strip, codecs_str.strip().strip(',').split(',')))) - vcodec, acodec, tcodec, hdr = None, None, None, None + vcodec, acodec, scodec, hdr = None, None, None, None for full_codec in split_codecs: - parts = full_codec.split('.') - codec = parts[0].replace('0', '') - if codec in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', - 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'): - if not vcodec: - vcodec = '.'.join(parts[:4]) if codec in ('vp9', 'av1', 'hvc1') else full_codec - if codec in ('dvh1', 'dvhe'): - hdr = 'DV' - elif codec == 'av1' and len(parts) > 3 and parts[3] == '10': - hdr = 'HDR10' - elif full_codec.replace('0', '').startswith('vp9.2'): - hdr = 'HDR10' - elif codec in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): - if not acodec: - acodec = full_codec - elif codec in ('stpp', 'wvtt',): - if not tcodec: - tcodec = full_codec + parts = re.sub(r'0+(?=\d)', '', full_codec).split('.') + if parts[0] in ('avc1', 'avc2', 'avc3', 'avc4', 'vp9', 'vp8', 'hev1', 'hev2', + 'h263', 'h264', 'mp4v', 'hvc1', 'av1', 'theora', 'dvh1', 'dvhe'): + if vcodec: + continue + vcodec = full_codec + if parts[0] in ('dvh1', 'dvhe'): + hdr = 'DV' + elif parts[0] == 'av1' and traverse_obj(parts, 3) == '10': + hdr = 'HDR10' + elif parts[:2] == ['vp9', '2']: + hdr = 'HDR10' + elif parts[0] in ('flac', 'mp4a', 'opus', 'vorbis', 'mp3', 'aac', + 'ac-3', 'ec-3', 'eac3', 'dtsc', 'dtse', 'dtsh', 'dtsl'): + acodec = acodec or full_codec + elif parts[0] in ('stpp', 'wvtt'): + scodec = scodec or full_codec else: - write_string('WARNING: Unknown codec %s\n' % full_codec, sys.stderr) - if vcodec or acodec or tcodec: + write_string(f'WARNING: Unknown codec {full_codec}\n') + if vcodec or acodec or scodec: return { 'vcodec': vcodec or 'none', 'acodec': acodec or 'none', 'dynamic_range': hdr, - **({'tcodec': tcodec} if tcodec is not None else {}), + **({'scodec': scodec} if scodec is not None else {}), } elif len(split_codecs) == 2: return { @@ -3442,6 +3593,46 @@ def parse_codecs(codecs_str): return {} +def get_compatible_ext(*, vcodecs, acodecs, vexts, aexts, preferences=None): + assert len(vcodecs) == len(vexts) and len(acodecs) == len(aexts) + + allow_mkv = not preferences or 'mkv' in preferences + + if allow_mkv and max(len(acodecs), len(vcodecs)) > 1: + return 'mkv' # TODO: any other format allows this? + + # TODO: All codecs supported by parse_codecs isn't handled here + COMPATIBLE_CODECS = { + 'mp4': { + 'av1', 'hevc', 'avc1', 'mp4a', # fourcc (m3u8, mpd) + 'h264', 'aacl', 'ec-3', # Set in ISM + }, + 'webm': { + 'av1', 'vp9', 'vp8', 'opus', 'vrbs', + 'vp9x', 'vp8x', # in the webm spec + }, + } + + sanitize_codec = functools.partial(try_get, getter=lambda x: x[0].split('.')[0].replace('0', '')) + vcodec, acodec = sanitize_codec(vcodecs), sanitize_codec(acodecs) + + for ext in preferences or COMPATIBLE_CODECS.keys(): + codec_set = COMPATIBLE_CODECS.get(ext, set()) + if ext == 'mkv' or codec_set.issuperset((vcodec, acodec)): + return ext + + COMPATIBLE_EXTS = ( + {'mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma', 'mov'}, + {'webm'}, + ) + for ext in preferences or vexts: + current_exts = {ext, *vexts, *aexts} + if ext == 'mkv' or current_exts == {ext} or any( + ext_sets.issuperset(current_exts) for ext_sets in COMPATIBLE_EXTS): + return ext + return 'mkv' if allow_mkv else preferences[-1] + + def urlhandle_detect_ext(url_handle): getheader = url_handle.headers.get @@ -3470,24 +3661,25 @@ def age_restricted(content_limit, age_limit): return age_limit < content_limit +# List of known byte-order-marks (BOM) +BOMS = [ + (b'\xef\xbb\xbf', 'utf-8'), + (b'\x00\x00\xfe\xff', 'utf-32-be'), + (b'\xff\xfe\x00\x00', 'utf-32-le'), + (b'\xff\xfe', 'utf-16-le'), + (b'\xfe\xff', 'utf-16-be'), +] + + def is_html(first_bytes): """ Detect whether a file contains HTML by examining its first bytes. """ - BOMS = [ - (b'\xef\xbb\xbf', 'utf-8'), - (b'\x00\x00\xfe\xff', 'utf-32-be'), - (b'\xff\xfe\x00\x00', 'utf-32-le'), - (b'\xff\xfe', 'utf-16-le'), - (b'\xfe\xff', 'utf-16-be'), - ] + encoding = 'utf-8' for bom, enc in BOMS: - if first_bytes.startswith(bom): - s = first_bytes[len(bom):].decode(enc, 'replace') - break - else: - s = first_bytes.decode('utf-8', 'replace') + while first_bytes.startswith(bom): + encoding, first_bytes = enc, first_bytes[len(bom):] - return re.match(r'^\s*<', s) + return re.match(r'^\s*<', first_bytes.decode(encoding, 'replace')) def determine_protocol(info_dict): @@ -3505,11 +3697,11 @@ def determine_protocol(info_dict): ext = determine_ext(url) if ext == 'm3u8': - return 'm3u8' + return 'm3u8' if info_dict.get('is_live') else 'm3u8_native' elif ext == 'f4m': return 'f4m' - return compat_urllib_parse_urlparse(url).scheme + return urllib.parse.urlparse(url).scheme def render_table(header_row, data, delim=False, extra_gap=0, hide_empty=False): @@ -3566,16 +3758,15 @@ def _match_one(filter_part, dct, incomplete): else: is_incomplete = lambda k: k in incomplete - operator_rex = re.compile(r'''(?x)\s* + operator_rex = re.compile(r'''(?x) (?P<key>[a-z_]+) \s*(?P<negation>!\s*)?(?P<op>%s)(?P<none_inclusive>\s*\?)?\s* (?: (?P<quote>["\'])(?P<quotedstrval>.+?)(?P=quote)| (?P<strval>.+?) ) - \s*$ ''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys()))) - m = operator_rex.search(filter_part) + m = operator_rex.fullmatch(filter_part.strip()) if m: m = m.groupdict() unnegated_op = COMPARISON_OPERATORS[m['op']] @@ -3588,7 +3779,7 @@ def _match_one(filter_part, dct, incomplete): comparison_value = comparison_value.replace(r'\%s' % m['quote'], m['quote']) actual_value = dct.get(m['key']) numeric_comparison = None - if isinstance(actual_value, compat_numeric_types): + if isinstance(actual_value, (int, float)): # If the original field is a string and matching comparisonvalue is # a number we should respect the origin of the original field # and process comparison value as a string (see @@ -3611,11 +3802,10 @@ def _match_one(filter_part, dct, incomplete): '': lambda v: (v is True) if isinstance(v, bool) else (v is not None), '!': lambda v: (v is False) if isinstance(v, bool) else (v is None), } - operator_rex = re.compile(r'''(?x)\s* + operator_rex = re.compile(r'''(?x) (?P<op>%s)\s*(?P<key>[a-z_]+) - \s*$ ''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys()))) - m = operator_rex.search(filter_part) + m = operator_rex.fullmatch(filter_part.strip()) if m: op = UNARY_OPERATORS[m.group('op')] actual_value = dct.get(m.group('key')) @@ -3641,23 +3831,52 @@ def match_str(filter_str, dct, incomplete=False): def match_filter_func(filters): if not filters: return None - filters = variadic(filters) + filters = set(variadic(filters)) - def _match_func(info_dict, *args, **kwargs): - if any(match_str(f, info_dict, *args, **kwargs) for f in filters): - return None + interactive = '-' in filters + if interactive: + filters.remove('-') + + def _match_func(info_dict, incomplete=False): + if not filters or any(match_str(f, info_dict, incomplete) for f in filters): + return NO_DEFAULT if interactive and not incomplete else None else: - video_title = info_dict.get('title') or info_dict.get('id') or 'video' + video_title = info_dict.get('title') or info_dict.get('id') or 'entry' filter_str = ') | ('.join(map(str.strip, filters)) return f'{video_title} does not pass filter ({filter_str}), skipping ..' return _match_func +class download_range_func: + def __init__(self, chapters, ranges): + self.chapters, self.ranges = chapters, ranges + + def __call__(self, info_dict, ydl): + if not self.ranges and not self.chapters: + yield {} + + warning = ('There are no chapters matching the regex' if info_dict.get('chapters') + else 'Cannot match chapters since chapter information is unavailable') + for regex in self.chapters or []: + for i, chapter in enumerate(info_dict.get('chapters') or []): + if re.search(regex, chapter['title']): + warning = None + yield {**chapter, 'index': i} + if self.chapters and warning: + ydl.to_screen(f'[info] {info_dict["id"]}: {warning}') + + yield from ({'start_time': start, 'end_time': end} for start, end in self.ranges or []) + + def __eq__(self, other): + return (isinstance(other, download_range_func) + and self.chapters == other.chapters and self.ranges == other.ranges) + + def parse_dfxp_time_expr(time_expr): if not time_expr: return - mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr) + mobj = re.match(rf'^(?P<time_offset>{NUMBER_RE})s?$', time_expr) if mobj: return float(mobj.group('time_offset')) @@ -3709,7 +3928,7 @@ def dfxp2srt(dfxp_data): styles = {} default_style = {} - class TTMLPElementParser(object): + class TTMLPElementParser: _out = '' _unclosed_elements = [] _applied_styles = [] @@ -3839,26 +4058,21 @@ def dfxp2srt(dfxp_data): return ''.join(out) -def cli_option(params, command_option, param): +def cli_option(params, command_option, param, separator=None): param = params.get(param) - if param: - param = compat_str(param) - return [command_option, param] if param is not None else [] + return ([] if param is None + else [command_option, str(param)] if separator is None + else [f'{command_option}{separator}{param}']) def cli_bool_option(params, command_option, param, true_value='true', false_value='false', separator=None): param = params.get(param) - if param is None: - return [] - assert isinstance(param, bool) - if separator: - return [command_option + separator + (true_value if param else false_value)] - return [command_option, true_value if param else false_value] + assert param in (True, False, None) + return cli_option({True: true_value, False: false_value}, command_option, param, separator) def cli_valueless_option(params, command_option, param, expected_value=True): - param = params.get(param) - return [command_option] if param == expected_value else [] + return [command_option] if params.get(param) == expected_value else [] def cli_configuration_args(argdict, keys, default=[], use_compat=True): @@ -3894,7 +4108,7 @@ def _configuration_args(main_key, argdict, exe, keys=None, default=[], use_compa return cli_configuration_args(argdict, keys, default, use_compat) -class ISO639Utils(object): +class ISO639Utils: # See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt _lang_map = { 'aa': 'aar', @@ -4099,7 +4313,7 @@ class ISO639Utils(object): return short_name -class ISO3166Utils(object): +class ISO3166Utils: # From http://data.okfn.org/data/core/country-list _country_map = { 'AF': 'Afghanistan', @@ -4351,6 +4565,9 @@ class ISO3166Utils(object): 'YE': 'Yemen', 'ZM': 'Zambia', 'ZW': 'Zimbabwe', + # Not ISO 3166 codes, but used for IP blocks + 'AP': 'Asia/Pacific Region', + 'EU': 'Europe', } @classmethod @@ -4359,7 +4576,7 @@ class ISO3166Utils(object): return cls._country_map.get(code.upper()) -class GeoUtils(object): +class GeoUtils: # Major IPv4 address blocks per country _country_ip_map = { 'AD': '46.172.224.0/19', @@ -4613,20 +4830,20 @@ class GeoUtils(object): else: block = code_or_block addr, preflen = block.split('/') - addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] + addr_min = struct.unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) - return compat_str(socket.inet_ntoa( - compat_struct_pack('!L', random.randint(addr_min, addr_max)))) + return str(socket.inet_ntoa( + struct.pack('!L', random.randint(addr_min, addr_max)))) -class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): +class PerRequestProxyHandler(urllib.request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) - compat_urllib_request.ProxyHandler.__init__(self, proxies) + urllib.request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') @@ -4636,11 +4853,11 @@ class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): if proxy == '__noproxy__': return None # No Proxy - if compat_urlparse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): + if urllib.parse.urlparse(proxy).scheme.lower() in ('socks', 'socks4', 'socks4a', 'socks5'): req.add_header('Ytdl-socks-proxy', proxy) # hypervideo's http/https handlers do wrapping the socket with socks return None - return compat_urllib_request.ProxyHandler.proxy_open( + return urllib.request.ProxyHandler.proxy_open( self, req, proxy, type) @@ -4660,7 +4877,7 @@ def long_to_bytes(n, blocksize=0): s = b'' n = int(n) while n > 0: - s = compat_struct_pack('>I', n & 0xffffffff) + s + s = struct.pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): @@ -4691,7 +4908,7 @@ def bytes_to_long(s): s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): - acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] + acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0] return acc @@ -4727,22 +4944,42 @@ def pkcs1pad(data, length): return [0, 2] + pseudo_random + [0] + data -def encode_base_n(num, n, table=None): - FULL_TABLE = '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' - if not table: - table = FULL_TABLE[:n] +def _base_n_table(n, table): + if not table and not n: + raise ValueError('Either table or n must be specified') + table = (table or '0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')[:n] + + if n and n != len(table): + raise ValueError(f'base {n} exceeds table length {len(table)}') + return table - if n > len(table): - raise ValueError('base %d exceeds table length %d' % (n, len(table))) - if num == 0: +def encode_base_n(num, n=None, table=None): + """Convert given int to a base-n string""" + table = _base_n_table(n, table) + if not num: return table[0] - ret = '' + result, base = '', len(table) while num: - ret = table[num % n] + ret - num = num // n - return ret + result = table[num % base] + result + num = num // base + return result + + +def decode_base_n(string, n=None, table=None): + """Convert given base-n string to int""" + table = {char: index for index, char in enumerate(_base_n_table(n, table))} + result, base = 0, len(table) + for char in string: + result = result * base + table[char] + return result + + +def decode_base(value, digits): + deprecation_warning(f'{__name__}.decode_base is deprecated and may be removed ' + f'in a future version. Use {__name__}.decode_base_n instead') + return decode_base_n(value, table=digits) def decode_packed_codes(code): @@ -4796,10 +5033,10 @@ def decode_png(png_data): header = png_data[8:] if png_data[:8] != b'\x89PNG\x0d\x0a\x1a\x0a' or header[4:8] != b'IHDR': - raise IOError('Not a valid PNG file.') + raise OSError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} - unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] + unpack_integer = lambda x: struct.unpack(int_map[len(x)], x)[0] chunks = [] @@ -4833,7 +5070,7 @@ def decode_png(png_data): idat += chunk['data'] if not idat: - raise IOError('Unable to read PNG data.') + raise OSError('Unable to read PNG data.') decompressed_data = bytearray(zlib.decompress(idat)) @@ -4897,87 +5134,54 @@ def decode_png(png_data): def write_xattr(path, key, value): - # This mess below finds the best xattr tool for the job - try: - # try the pyxattr module... - import xattr - - if hasattr(xattr, 'set'): # pyxattr - # Unicode arguments are not supported in python-pyxattr until - # version 0.5.0 - # See https://github.com/ytdl-org/youtube-dl/issues/5498 - pyxattr_required_version = '0.5.0' - if version_tuple(xattr.__version__) < version_tuple(pyxattr_required_version): - # TODO: fallback to CLI tools - raise XAttrUnavailableError( - 'python-pyxattr is detected but is too old. ' - 'hypervideo requires %s or above while your version is %s. ' - 'Falling back to other xattr implementations' % ( - pyxattr_required_version, xattr.__version__)) - - setxattr = xattr.set - else: # xattr - setxattr = xattr.setxattr + # Windows: Write xattrs to NTFS Alternate Data Streams: + # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 + if compat_os_name == 'nt': + assert ':' not in key + assert os.path.exists(path) try: - setxattr(path, key, value) - except EnvironmentError as e: + with open(f'{path}:{key}', 'wb') as f: + f.write(value) + except OSError as e: raise XAttrMetadataError(e.errno, e.strerror) + return - except ImportError: - if compat_os_name == 'nt': - # Write xattrs to NTFS Alternate Data Streams: - # http://en.wikipedia.org/wiki/NTFS#Alternate_data_streams_.28ADS.29 - assert ':' not in key - assert os.path.exists(path) - - ads_fn = path + ':' + key - try: - with open(ads_fn, 'wb') as f: - f.write(value) - except EnvironmentError as e: - raise XAttrMetadataError(e.errno, e.strerror) - else: - user_has_setfattr = check_executable('setfattr', ['--version']) - user_has_xattr = check_executable('xattr', ['-h']) - - if user_has_setfattr or user_has_xattr: + # UNIX Method 1. Use xattrs/pyxattrs modules - value = value.decode('utf-8') - if user_has_setfattr: - executable = 'setfattr' - opts = ['-n', key, '-v', value] - elif user_has_xattr: - executable = 'xattr' - opts = ['-w', key, value] + setxattr = None + if getattr(xattr, '_hypervideo_dl__identifier', None) == 'pyxattr': + # Unicode arguments are not supported in pyxattr until version 0.5.0 + # See https://github.com/ytdl-org/youtube-dl/issues/5498 + if version_tuple(xattr.__version__) >= (0, 5, 0): + setxattr = xattr.set + elif xattr: + setxattr = xattr.setxattr - cmd = ([encodeFilename(executable, True)] - + [encodeArgument(o) for o in opts] - + [encodeFilename(path, True)]) + if setxattr: + try: + setxattr(path, key, value) + except OSError as e: + raise XAttrMetadataError(e.errno, e.strerror) + return - try: - p = Popen( - cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) - except EnvironmentError as e: - raise XAttrMetadataError(e.errno, e.strerror) - stdout, stderr = p.communicate_or_kill() - stderr = stderr.decode('utf-8', 'replace') - if p.returncode != 0: - raise XAttrMetadataError(p.returncode, stderr) + # UNIX Method 2. Use setfattr/xattr executables + exe = ('setfattr' if check_executable('setfattr', ['--version']) + else 'xattr' if check_executable('xattr', ['-h']) else None) + if not exe: + raise XAttrUnavailableError( + 'Couldn\'t find a tool to set the xattrs. Install either the python "xattr" or "pyxattr" modules or the ' + + ('"xattr" binary' if sys.platform != 'linux' else 'GNU "attr" package (which contains the "setfattr" tool)')) - else: - # On Unix, and can't find pyxattr, setfattr, or xattr. - if sys.platform.startswith('linux'): - raise XAttrUnavailableError( - "Couldn't find a tool to set the xattrs. " - "Install either the python 'pyxattr' or 'xattr' " - "modules, or the GNU 'attr' package " - "(which contains the 'setfattr' tool).") - else: - raise XAttrUnavailableError( - "Couldn't find a tool to set the xattrs. " - "Install either the python 'xattr' module, " - "or the 'xattr' binary.") + value = value.decode() + try: + _, stderr, returncode = Popen.run( + [exe, '-w', key, value, path] if exe == 'xattr' else [exe, '-n', key, '-v', value, path], + text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) + except OSError as e: + raise XAttrMetadataError(e.errno, e.strerror) + if returncode: + raise XAttrMetadataError(returncode, stderr) def random_birthday(year_field, month_field, day_field): @@ -4993,12 +5197,12 @@ def random_birthday(year_field, month_field, day_field): # Templates for internet shortcut files, which are plain text files. -DOT_URL_LINK_TEMPLATE = ''' +DOT_URL_LINK_TEMPLATE = '''\ [InternetShortcut] URL=%(url)s -'''.lstrip() +''' -DOT_WEBLOC_LINK_TEMPLATE = ''' +DOT_WEBLOC_LINK_TEMPLATE = '''\ <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> <plist version="1.0"> @@ -5007,16 +5211,16 @@ DOT_WEBLOC_LINK_TEMPLATE = ''' \t<string>%(url)s</string> </dict> </plist> -'''.lstrip() +''' -DOT_DESKTOP_LINK_TEMPLATE = ''' +DOT_DESKTOP_LINK_TEMPLATE = '''\ [Desktop Entry] Encoding=UTF-8 Name=%(filename)s Type=Link URL=%(url)s Icon=text-html -'''.lstrip() +''' LINK_TEMPLATES = { 'url': DOT_URL_LINK_TEMPLATE, @@ -5032,7 +5236,7 @@ def iri_to_uri(iri): The function doesn't add an additional layer of escaping; e.g., it doesn't escape `%3C` as `%253C`. Instead, it percent-escapes characters with an underlying UTF-8 encoding *besides* those already escaped, leaving the URI intact. """ - iri_parts = compat_urllib_parse_urlparse(iri) + iri_parts = urllib.parse.urlparse(iri) if '[' in iri_parts.netloc: raise ValueError('IPv6 URIs are not, yet, supported.') @@ -5042,29 +5246,29 @@ def iri_to_uri(iri): net_location = '' if iri_parts.username: - net_location += compat_urllib_parse_quote(iri_parts.username, safe=r"!$%&'()*+,~") + net_location += urllib.parse.quote(iri_parts.username, safe=r"!$%&'()*+,~") if iri_parts.password is not None: - net_location += ':' + compat_urllib_parse_quote(iri_parts.password, safe=r"!$%&'()*+,~") + net_location += ':' + urllib.parse.quote(iri_parts.password, safe=r"!$%&'()*+,~") net_location += '@' - net_location += iri_parts.hostname.encode('idna').decode('utf-8') # Punycode for Unicode hostnames. + net_location += iri_parts.hostname.encode('idna').decode() # Punycode for Unicode hostnames. # The 'idna' encoding produces ASCII text. if iri_parts.port is not None and iri_parts.port != 80: net_location += ':' + str(iri_parts.port) - return compat_urllib_parse_urlunparse( + return urllib.parse.urlunparse( (iri_parts.scheme, net_location, - compat_urllib_parse_quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"), + urllib.parse.quote_plus(iri_parts.path, safe=r"!$%&'()*+,/:;=@|~"), # Unsure about the `safe` argument, since this is a legacy way of handling parameters. - compat_urllib_parse_quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"), + urllib.parse.quote_plus(iri_parts.params, safe=r"!$%&'()*+,/:;=@|~"), # Not totally sure about the `safe` argument, since the source does not explicitly mention the query URI component. - compat_urllib_parse_quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"), + urllib.parse.quote_plus(iri_parts.query, safe=r"!$%&'()*+,/:;=?@{|}~"), - compat_urllib_parse_quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~"))) + urllib.parse.quote_plus(iri_parts.fragment, safe=r"!#$%&'()*+,/:;=?@{|}~"))) # Source for `safe` arguments: https://url.spec.whatwg.org/#percent-encoded-bytes. @@ -5072,16 +5276,16 @@ def iri_to_uri(iri): def to_high_limit_path(path): if sys.platform in ['win32', 'cygwin']: # Work around MAX_PATH limitation on Windows. The maximum allowed length for the individual path segments may still be quite limited. - return r'\\?\ '.rstrip() + os.path.abspath(path) + return '\\\\?\\' + os.path.abspath(path) return path -def format_field(obj, field=None, template='%s', ignore=(None, ''), default='', func=None): +def format_field(obj, field=None, template='%s', ignore=NO_DEFAULT, default='', func=IDENTITY): val = traverse_obj(obj, *variadic(field)) - if val in ignore: + if (not val and val != 0) if ignore is NO_DEFAULT else val in variadic(ignore): return default - return template % (func(val) if func else val) + return template % func(val) def clean_podcast_url(url): @@ -5114,7 +5318,7 @@ def make_dir(path, to_screen=None): if dn and not os.path.exists(dn): os.makedirs(dn) return True - except (OSError, IOError) as err: + except OSError as err: if callable(to_screen) is not None: to_screen('unable to create directory ' + error_to_compat_str(err)) return False @@ -5133,7 +5337,7 @@ def get_executable_path(): def load_plugins(name, suffix, namespace): classes = {} - try: + with contextlib.suppress(FileNotFoundError): plugins_spec = importlib.util.spec_from_file_location( name, os.path.join(get_executable_path(), 'ytdlp_plugins', name, '__init__.py')) plugins = importlib.util.module_from_spec(plugins_spec) @@ -5146,133 +5350,186 @@ def load_plugins(name, suffix, namespace): continue klass = getattr(plugins, name) classes[name] = namespace[name] = klass - except FileNotFoundError: - pass return classes def traverse_obj( - obj, *path_list, default=None, expected_type=None, get_all=True, + obj, *paths, default=NO_DEFAULT, expected_type=None, get_all=True, casesense=True, is_user_input=False, traverse_string=False): - ''' Traverse nested list/dict/tuple - @param path_list A list of paths which are checked one by one. - Each path is a list of keys where each key is a string, - a function, a tuple of strings/None or "...". - When a fuction is given, it takes the key and value as arguments - and returns whether the key matches or not. When a tuple is given, - all the keys given in the tuple are traversed, and - "..." traverses all the keys in the object - "None" returns the object without traversal - @param default Default value to return - @param expected_type Only accept final value of this type (Can also be any callable) - @param get_all Return all the values obtained from a path or only the first one - @param casesense Whether to consider dictionary keys as case sensitive - @param is_user_input Whether the keys are generated from user input. If True, - strings are converted to int/slice if necessary - @param traverse_string Whether to traverse inside strings. If True, any - non-compatible object will also be converted into a string - # TODO: Write tests - ''' - if not casesense: - _lower = lambda k: (k.lower() if isinstance(k, str) else k) - path_list = (map(_lower, variadic(path)) for path in path_list) - - def _traverse_obj(obj, path, _current_depth=0): - nonlocal depth - path = tuple(variadic(path)) - for i, key in enumerate(path): - if None in (key, obj): - return obj - if isinstance(key, (list, tuple)): - obj = [_traverse_obj(obj, sub_key, _current_depth) for sub_key in key] - key = ... - if key is ...: - obj = (obj.values() if isinstance(obj, dict) - else obj if isinstance(obj, (list, tuple, LazyList)) - else str(obj) if traverse_string else []) - _current_depth += 1 - depth = max(depth, _current_depth) - return [_traverse_obj(inner_obj, path[i + 1:], _current_depth) for inner_obj in obj] - elif callable(key): - if isinstance(obj, (list, tuple, LazyList)): - obj = enumerate(obj) - elif isinstance(obj, dict): - obj = obj.items() - else: - if not traverse_string: - return None - obj = str(obj) - _current_depth += 1 - depth = max(depth, _current_depth) - return [_traverse_obj(v, path[i + 1:], _current_depth) for k, v in obj if try_call(key, args=(k, v))] - elif isinstance(obj, dict) and not (is_user_input and key == ':'): - obj = (obj.get(key) if casesense or (key in obj) - else next((v for k, v in obj.items() if _lower(k) == key), None)) - else: - if is_user_input: - key = (int_or_none(key) if ':' not in key - else slice(*map(int_or_none, key.split(':')))) - if key == slice(None): - return _traverse_obj(obj, (..., *path[i + 1:]), _current_depth) - if not isinstance(key, (int, slice)): - return None - if not isinstance(obj, (list, tuple, LazyList)): - if not traverse_string: - return None - obj = str(obj) - try: - obj = obj[key] - except IndexError: - return None - return obj + """ + Safely traverse nested `dict`s and `Sequence`s + + >>> obj = [{}, {"key": "value"}] + >>> traverse_obj(obj, (1, "key")) + "value" + + Each of the provided `paths` is tested and the first producing a valid result will be returned. + The next path will also be tested if the path branched but no results could be found. + Supported values for traversal are `Mapping`, `Sequence` and `re.Match`. + A value of None is treated as the absence of a value. + + The paths will be wrapped in `variadic`, so that `'key'` is conveniently the same as `('key', )`. + + The keys in the path can be one of: + - `None`: Return the current object. + - `str`/`int`: Return `obj[key]`. For `re.Match, return `obj.group(key)`. + - `slice`: Branch out and return all values in `obj[key]`. + - `Ellipsis`: Branch out and return a list of all values. + - `tuple`/`list`: Branch out and return a list of all matching values. + Read as: `[traverse_obj(obj, branch) for branch in branches]`. + - `function`: Branch out and return values filtered by the function. + Read as: `[value for key, value in obj if function(key, value)]`. + For `Sequence`s, `key` is the index of the value. + - `dict` Transform the current object and return a matching dict. + Read as: `{key: traverse_obj(obj, path) for key, path in dct.items()}`. + + `tuple`, `list`, and `dict` all support nested paths and branches. + + @params paths Paths which to traverse by. + @param default Value to return if the paths do not match. + @param expected_type If a `type`, only accept final values of this type. + If any other callable, try to call the function on each result. + @param get_all If `False`, return the first matching result, otherwise all matching ones. + @param casesense If `False`, consider string dictionary keys as case insensitive. + + The following are only meant to be used by YoutubeDL.prepare_outtmpl and are not part of the API + + @param is_user_input Whether the keys are generated from user input. + If `True` strings get converted to `int`/`slice` if needed. + @param traverse_string Whether to traverse into objects as strings. + If `True`, any non-compatible object will first be + converted into a string and then traversed into. + + + @returns The result of the object traversal. + If successful, `get_all=True`, and the path branches at least once, + then a list of results is returned instead. + A list is always returned if the last path branches and no `default` is given. + """ + is_sequence = lambda x: isinstance(x, collections.abc.Sequence) and not isinstance(x, (str, bytes)) + casefold = lambda k: k.casefold() if isinstance(k, str) else k if isinstance(expected_type, type): type_test = lambda val: val if isinstance(val, expected_type) else None - elif expected_type is not None: - type_test = expected_type else: - type_test = lambda val: val - - for path in path_list: - depth = 0 - val = _traverse_obj(obj, path) - if val is not None: - if depth: - for _ in range(depth - 1): - val = itertools.chain.from_iterable(v for v in val if v is not None) - val = [v for v in map(type_test, val) if v is not None] - if val: - return val if get_all else val[0] + type_test = lambda val: try_call(expected_type or IDENTITY, args=(val,)) + + def apply_key(key, obj): + if obj is None: + return + + elif key is None: + yield obj + + elif isinstance(key, (list, tuple)): + for branch in key: + _, result = apply_path(obj, branch) + yield from result + + elif key is ...: + if isinstance(obj, collections.abc.Mapping): + yield from obj.values() + elif is_sequence(obj): + yield from obj + elif isinstance(obj, re.Match): + yield from obj.groups() + elif traverse_string: + yield from str(obj) + + elif callable(key): + if is_sequence(obj): + iter_obj = enumerate(obj) + elif isinstance(obj, collections.abc.Mapping): + iter_obj = obj.items() + elif isinstance(obj, re.Match): + iter_obj = enumerate((obj.group(), *obj.groups())) + elif traverse_string: + iter_obj = enumerate(str(obj)) else: - val = type_test(val) - if val is not None: - return val - return default + return + yield from (v for k, v in iter_obj if try_call(key, args=(k, v))) + elif isinstance(key, dict): + iter_obj = ((k, _traverse_obj(obj, v)) for k, v in key.items()) + yield {k: v if v is not None else default for k, v in iter_obj + if v is not None or default is not NO_DEFAULT} -def traverse_dict(dictn, keys, casesense=True): - write_string('DeprecationWarning: hypervideo_dl.utils.traverse_dict is deprecated ' - 'and may be removed in a future version. Use hypervideo_dl.utils.traverse_obj instead') - return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True) + elif isinstance(obj, collections.abc.Mapping): + yield (obj.get(key) if casesense or (key in obj) + else next((v for k, v in obj.items() if casefold(k) == key), None)) + elif isinstance(obj, re.Match): + if isinstance(key, int) or casesense: + with contextlib.suppress(IndexError): + yield obj.group(key) + return -def get_first(obj, keys, **kwargs): - return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False) + if not isinstance(key, str): + return + yield next((v for k, v in obj.groupdict().items() if casefold(k) == key), None) -def variadic(x, allowed_types=(str, bytes, dict)): - return x if isinstance(x, collections.abc.Iterable) and not isinstance(x, allowed_types) else (x,) + else: + if is_user_input: + key = (int_or_none(key) if ':' not in key + else slice(*map(int_or_none, key.split(':')))) + if not isinstance(key, (int, slice)): + return -def decode_base(value, digits): - # This will convert given base-x string to scalar (long or int) - table = {char: index for index, char in enumerate(digits)} - result = 0 - base = len(digits) - for chr in value: - result *= base - result += table[chr] - return result + if not is_sequence(obj): + if not traverse_string: + return + obj = str(obj) + + with contextlib.suppress(IndexError): + yield obj[key] + + def apply_path(start_obj, path): + objs = (start_obj,) + has_branched = False + + for key in variadic(path): + if is_user_input and key == ':': + key = ... + + if not casesense and isinstance(key, str): + key = key.casefold() + + if key is ... or isinstance(key, (list, tuple)) or callable(key): + has_branched = True + + key_func = functools.partial(apply_key, key) + objs = itertools.chain.from_iterable(map(key_func, objs)) + + return has_branched, objs + + def _traverse_obj(obj, path, use_list=True): + has_branched, results = apply_path(obj, path) + results = LazyList(x for x in map(type_test, results) if x is not None) + + if get_all and has_branched: + return results.exhaust() if results or use_list else None + + return results[0] if results else None + + for index, path in enumerate(paths, 1): + use_list = default is NO_DEFAULT and index == len(paths) + result = _traverse_obj(obj, path, use_list) + if result is not None: + return result + + return None if default is NO_DEFAULT else default + + +def traverse_dict(dictn, keys, casesense=True): + deprecation_warning(f'"{__name__}.traverse_dict" is deprecated and may be removed ' + f'in a future version. Use "{__name__}.traverse_obj" instead') + return traverse_obj(dictn, keys, casesense=casesense, is_user_input=True, traverse_string=True) + + +def get_first(obj, keys, **kwargs): + return traverse_obj(obj, (..., *variadic(keys)), **kwargs, get_all=False) def time_seconds(**kwargs): @@ -5291,9 +5548,9 @@ def jwt_encode_hs256(payload_data, key, headers={}): } if headers: header_data.update(headers) - header_b64 = base64.b64encode(json.dumps(header_data).encode('utf-8')) - payload_b64 = base64.b64encode(json.dumps(payload_data).encode('utf-8')) - h = hmac.new(key.encode('utf-8'), header_b64 + b'.' + payload_b64, hashlib.sha256) + header_b64 = base64.b64encode(json.dumps(header_data).encode()) + payload_b64 = base64.b64encode(json.dumps(payload_data).encode()) + h = hmac.new(key.encode(), header_b64 + b'.' + payload_b64, hashlib.sha256) signature_b64 = base64.b64encode(h.digest()) token = header_b64 + b'.' + payload_b64 + b'.' + signature_b64 return token @@ -5302,14 +5559,18 @@ def jwt_encode_hs256(payload_data, key, headers={}): # can be extended in future to verify the signature and parse header and return the algorithm used if it's not HS256 def jwt_decode_hs256(jwt): header_b64, payload_b64, signature_b64 = jwt.split('.') - payload_data = json.loads(base64.urlsafe_b64decode(payload_b64)) + # add trailing ='s that may have been stripped, superfluous ='s are ignored + payload_data = json.loads(base64.urlsafe_b64decode(f'{payload_b64}===')) return payload_data +WINDOWS_VT_MODE = False if compat_os_name == 'nt' else None + + +@functools.cache def supports_terminal_sequences(stream): if compat_os_name == 'nt': - from .compat import WINDOWS_VT_MODE # Must be imported locally - if not WINDOWS_VT_MODE or get_windows_version() < (10, 0, 10586): + if not WINDOWS_VT_MODE: return False elif not os.getenv('TERM'): return False @@ -5319,6 +5580,19 @@ def supports_terminal_sequences(stream): return False +def windows_enable_vt_mode(): # TODO: Do this the proper way https://bugs.python.org/issue30075 + if get_windows_version() < (10, 0, 10586): + return + global WINDOWS_VT_MODE + try: + Popen.run('', shell=True) + except Exception: + return + + WINDOWS_VT_MODE = True + supports_terminal_sequences.cache_clear() + + _terminal_sequences_re = re.compile('\033\\[[^m]+m') @@ -5332,7 +5606,7 @@ def number_of_digits(number): def join_nonempty(*values, delim='-', from_dict=None): if from_dict is not None: - values = map(from_dict.get, values) + values = (traverse_obj(from_dict, variadic(v)) for v in values) return delim.join(map(str, filter(None, values))) @@ -5346,7 +5620,7 @@ def scale_thumbnails_to_max_format_width(formats, thumbnails, url_width_re): """ _keys = ('width', 'height') max_dimensions = max( - [tuple(format.get(k) or 0 for k in _keys) for format in formats], + (tuple(format.get(k) or 0 for k in _keys) for format in formats), default=(0, 0)) if not max_dimensions[0]: return thumbnails @@ -5368,33 +5642,69 @@ def parse_http_range(range): return int(crg.group(1)), int_or_none(crg.group(2)), int_or_none(crg.group(3)) +def read_stdin(what): + eof = 'Ctrl+Z' if compat_os_name == 'nt' else 'Ctrl+D' + write_string(f'Reading {what} from STDIN - EOF ({eof}) to end:\n') + return sys.stdin + + +def determine_file_encoding(data): + """ + Detect the text encoding used + @returns (encoding, bytes to skip) + """ + + # BOM marks are given priority over declarations + for bom, enc in BOMS: + if data.startswith(bom): + return enc, len(bom) + + # Strip off all null bytes to match even when UTF-16 or UTF-32 is used. + # We ignore the endianness to get a good enough match + data = data.replace(b'\0', b'') + mobj = re.match(rb'(?m)^#\s*coding\s*:\s*(\S+)\s*$', data) + return mobj.group(1).decode() if mobj else None, 0 + + class Config: own_args = None + parsed_args = None filename = None __initialized = False def __init__(self, parser, label=None): - self._parser, self.label = parser, label + self.parser, self.label = parser, label self._loaded_paths, self.configs = set(), [] def init(self, args=None, filename=None): assert not self.__initialized + self.own_args, self.filename = args, filename + return self.load_configs() + + def load_configs(self): directory = '' - if filename: - location = os.path.realpath(filename) + if self.filename: + location = os.path.realpath(self.filename) directory = os.path.dirname(location) if location in self._loaded_paths: return False self._loaded_paths.add(location) self.__initialized = True - self.own_args, self.filename = args, filename - for location in self._parser.parse_args(args)[0].config_locations or []: + opts, _ = self.parser.parse_known_args(self.own_args) + self.parsed_args = self.own_args + for location in opts.config_locations or []: + if location == '-': + if location in self._loaded_paths: + continue + self._loaded_paths.add(location) + self.append_config(shlex.split(read_stdin('options'), comments=True), label='stdin') + continue location = os.path.join(directory, expand_path(location)) if os.path.isdir(location): location = os.path.join(location, 'hypervideo.conf') if not os.path.exists(location): - self._parser.error(f'config location {location} does not exist') + self.parser.error(f'config location {location} does not exist') self.append_config(self.read_file(location), location) return True @@ -5410,22 +5720,27 @@ class Config: @staticmethod def read_file(filename, default=[]): try: - optionf = open(filename) - except IOError: + optionf = open(filename, 'rb') + except OSError: return default # silently skip if file is not present try: + enc, skip = determine_file_encoding(optionf.read(512)) + optionf.seek(skip, io.SEEK_SET) + except OSError: + enc = None # silently skip read errors + try: # FIXME: https://github.com/ytdl-org/youtube-dl/commit/dfe5fa49aed02cf36ba9f743b11b0903554b5e56 - contents = optionf.read() - if sys.version_info < (3,): - contents = contents.decode(preferredencoding()) - res = compat_shlex_split(contents, comments=True) + contents = optionf.read().decode(enc or preferredencoding()) + res = shlex.split(contents, comments=True) + except Exception as err: + raise ValueError(f'Unable to parse "{filename}": {err}') finally: optionf.close() return res @staticmethod def hide_login_info(opts): - PRIVATE_OPTS = set(['-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username']) + PRIVATE_OPTS = {'-p', '--password', '-u', '--username', '--video-password', '--ap-password', '--ap-username'} eqre = re.compile('^(?P<key>' + ('|'.join(re.escape(po) for po in PRIVATE_OPTS)) + ')=.+$') def _scrub_eq(o): @@ -5442,7 +5757,7 @@ class Config: return opts def append_config(self, *args, label=None): - config = type(self)(self._parser, label) + config = type(self)(self.parser, label) config._loaded_paths = self._loaded_paths if config.init(*args): self.configs.append(config) @@ -5451,18 +5766,23 @@ class Config: def all_args(self): for config in reversed(self.configs): yield from config.all_args - yield from self.own_args or [] + yield from self.parsed_args or [] + + def parse_known_args(self, **kwargs): + return self.parser.parse_known_args(self.all_args, **kwargs) def parse_args(self): - return self._parser.parse_args(list(self.all_args)) + return self.parser.parse_args(self.all_args) -class WebSocketsWrapper(): +class WebSocketsWrapper: """Wraps websockets module to use in non-async scopes""" + pool = None def __init__(self, url, headers=None, connect=True): - self.loop = asyncio.events.new_event_loop() - self.conn = compat_websockets.connect( + self.loop = asyncio.new_event_loop() + # XXX: "loop" is deprecated + self.conn = websockets.connect( url, extra_headers=headers, ping_interval=None, close_timeout=float('inf'), loop=self.loop, ping_timeout=float('inf')) if connect: @@ -5491,7 +5811,7 @@ class WebSocketsWrapper(): # for contributors: If there's any new library using asyncio needs to be run in non-async, move these function out of this class @staticmethod def run_with_loop(main, loop): - if not asyncio.coroutines.iscoroutine(main): + if not asyncio.iscoroutine(main): raise ValueError(f'a coroutine was expected, got {main!r}') try: @@ -5503,7 +5823,7 @@ class WebSocketsWrapper(): @staticmethod def _cancel_all_tasks(loop): - to_cancel = asyncio.tasks.all_tasks(loop) + to_cancel = asyncio.all_tasks(loop) if not to_cancel: return @@ -5511,8 +5831,9 @@ class WebSocketsWrapper(): for task in to_cancel: task.cancel() + # XXX: "loop" is removed in python 3.10+ loop.run_until_complete( - asyncio.tasks.gather(*to_cancel, loop=loop, return_exceptions=True)) + asyncio.gather(*to_cancel, loop=loop, return_exceptions=True)) for task in to_cancel: if task.cancelled(): @@ -5525,17 +5846,459 @@ class WebSocketsWrapper(): }) -has_websockets = bool(compat_websockets) - - def merge_headers(*dicts): """Merge dicts of http headers case insensitively, prioritizing the latter ones""" return {k.title(): v for k, v in itertools.chain.from_iterable(map(dict.items, dicts))} +def cached_method(f): + """Cache a method""" + signature = inspect.signature(f) + + @functools.wraps(f) + def wrapper(self, *args, **kwargs): + bound_args = signature.bind(self, *args, **kwargs) + bound_args.apply_defaults() + key = tuple(bound_args.arguments.values())[1:] + + cache = vars(self).setdefault('_cached_method__cache', {}).setdefault(f.__name__, {}) + if key not in cache: + cache[key] = f(self, *args, **kwargs) + return cache[key] + return wrapper + + class classproperty: - def __init__(self, f): - self.f = f + """property access for class methods with optional caching""" + def __new__(cls, func=None, *args, **kwargs): + if not func: + return functools.partial(cls, *args, **kwargs) + return super().__new__(cls) + + def __init__(self, func, *, cache=False): + functools.update_wrapper(self, func) + self.func = func + self._cache = {} if cache else None def __get__(self, _, cls): - return self.f(cls) + if self._cache is None: + return self.func(cls) + elif cls not in self._cache: + self._cache[cls] = self.func(cls) + return self._cache[cls] + + +class Namespace(types.SimpleNamespace): + """Immutable namespace""" + + def __iter__(self): + return iter(self.__dict__.values()) + + @property + def items_(self): + return self.__dict__.items() + + +MEDIA_EXTENSIONS = Namespace( + common_video=('avi', 'flv', 'mkv', 'mov', 'mp4', 'webm'), + video=('3g2', '3gp', 'f4v', 'mk3d', 'divx', 'mpg', 'ogv', 'm4v', 'wmv'), + common_audio=('aiff', 'alac', 'flac', 'm4a', 'mka', 'mp3', 'ogg', 'opus', 'wav'), + audio=('aac', 'ape', 'asf', 'f4a', 'f4b', 'm4b', 'm4p', 'm4r', 'oga', 'ogx', 'spx', 'vorbis', 'wma'), + thumbnails=('jpg', 'png', 'webp'), + storyboards=('mhtml', ), + subtitles=('srt', 'vtt', 'ass', 'lrc'), + manifests=('f4f', 'f4m', 'm3u8', 'smil', 'mpd'), +) +MEDIA_EXTENSIONS.video += MEDIA_EXTENSIONS.common_video +MEDIA_EXTENSIONS.audio += MEDIA_EXTENSIONS.common_audio + +KNOWN_EXTENSIONS = (*MEDIA_EXTENSIONS.video, *MEDIA_EXTENSIONS.audio, *MEDIA_EXTENSIONS.manifests) + + +class RetryManager: + """Usage: + for retry in RetryManager(...): + try: + ... + except SomeException as err: + retry.error = err + continue + """ + attempt, _error = 0, None + + def __init__(self, _retries, _error_callback, **kwargs): + self.retries = _retries or 0 + self.error_callback = functools.partial(_error_callback, **kwargs) + + def _should_retry(self): + return self._error is not NO_DEFAULT and self.attempt <= self.retries + + @property + def error(self): + if self._error is NO_DEFAULT: + return None + return self._error + + @error.setter + def error(self, value): + self._error = value + + def __iter__(self): + while self._should_retry(): + self.error = NO_DEFAULT + self.attempt += 1 + yield self + if self.error: + self.error_callback(self.error, self.attempt, self.retries) + + @staticmethod + def report_retry(e, count, retries, *, sleep_func, info, warn, error=None, suffix=None): + """Utility function for reporting retries""" + if count > retries: + if error: + return error(f'{e}. Giving up after {count - 1} retries') if count > 1 else error(str(e)) + raise e + + if not count: + return warn(e) + elif isinstance(e, ExtractorError): + e = remove_end(str_or_none(e.cause) or e.orig_msg, '.') + warn(f'{e}. Retrying{format_field(suffix, None, " %s")} ({count}/{retries})...') + + delay = float_or_none(sleep_func(n=count - 1)) if callable(sleep_func) else sleep_func + if delay: + info(f'Sleeping {delay:.2f} seconds ...') + time.sleep(delay) + + +def make_archive_id(ie, video_id): + ie_key = ie if isinstance(ie, str) else ie.ie_key() + return f'{ie_key.lower()} {video_id}' + + +def truncate_string(s, left, right=0): + assert left > 3 and right >= 0 + if s is None or len(s) <= left + right: + return s + return f'{s[:left-3]}...{s[-right:]}' + + +def orderedSet_from_options(options, alias_dict, *, use_regex=False, start=None): + assert 'all' in alias_dict, '"all" alias is required' + requested = list(start or []) + for val in options: + discard = val.startswith('-') + if discard: + val = val[1:] + + if val in alias_dict: + val = alias_dict[val] if not discard else [ + i[1:] if i.startswith('-') else f'-{i}' for i in alias_dict[val]] + # NB: Do not allow regex in aliases for performance + requested = orderedSet_from_options(val, alias_dict, start=requested) + continue + + current = (filter(re.compile(val, re.I).fullmatch, alias_dict['all']) if use_regex + else [val] if val in alias_dict['all'] else None) + if current is None: + raise ValueError(val) + + if discard: + for item in current: + while item in requested: + requested.remove(item) + else: + requested.extend(current) + + return orderedSet(requested) + + +class FormatSorter: + regex = r' *((?P<reverse>\+)?(?P<field>[a-zA-Z0-9_]+)((?P<separator>[~:])(?P<limit>.*?))?)? *$' + + default = ('hidden', 'aud_or_vid', 'hasvid', 'ie_pref', 'lang', 'quality', + 'res', 'fps', 'hdr:12', 'vcodec:vp9.2', 'channels', 'acodec', + 'size', 'br', 'asr', 'proto', 'ext', 'hasaud', 'source', 'id') # These must not be aliases + ytdl_default = ('hasaud', 'lang', 'quality', 'tbr', 'filesize', 'vbr', + 'height', 'width', 'proto', 'vext', 'abr', 'aext', + 'fps', 'fs_approx', 'source', 'id') + + settings = { + 'vcodec': {'type': 'ordered', 'regex': True, + 'order': ['av0?1', 'vp0?9.2', 'vp0?9', '[hx]265|he?vc?', '[hx]264|avc', 'vp0?8', 'mp4v|h263', 'theora', '', None, 'none']}, + 'acodec': {'type': 'ordered', 'regex': True, + 'order': ['[af]lac', 'wav|aiff', 'opus', 'vorbis|ogg', 'aac', 'mp?4a?', 'mp3', 'e-?a?c-?3', 'ac-?3', 'dts', '', None, 'none']}, + 'hdr': {'type': 'ordered', 'regex': True, 'field': 'dynamic_range', + 'order': ['dv', '(hdr)?12', r'(hdr)?10\+', '(hdr)?10', 'hlg', '', 'sdr', None]}, + 'proto': {'type': 'ordered', 'regex': True, 'field': 'protocol', + 'order': ['(ht|f)tps', '(ht|f)tp$', 'm3u8.*', '.*dash', 'websocket_frag', 'rtmpe?', '', 'mms|rtsp', 'ws|websocket', 'f4']}, + 'vext': {'type': 'ordered', 'field': 'video_ext', + 'order': ('mp4', 'mov', 'webm', 'flv', '', 'none'), + 'order_free': ('webm', 'mp4', 'mov', 'flv', '', 'none')}, + 'aext': {'type': 'ordered', 'field': 'audio_ext', + 'order': ('m4a', 'aac', 'mp3', 'ogg', 'opus', 'webm', '', 'none'), + 'order_free': ('ogg', 'opus', 'webm', 'mp3', 'm4a', 'aac', '', 'none')}, + 'hidden': {'visible': False, 'forced': True, 'type': 'extractor', 'max': -1000}, + 'aud_or_vid': {'visible': False, 'forced': True, 'type': 'multiple', + 'field': ('vcodec', 'acodec'), + 'function': lambda it: int(any(v != 'none' for v in it))}, + 'ie_pref': {'priority': True, 'type': 'extractor'}, + 'hasvid': {'priority': True, 'field': 'vcodec', 'type': 'boolean', 'not_in_list': ('none',)}, + 'hasaud': {'field': 'acodec', 'type': 'boolean', 'not_in_list': ('none',)}, + 'lang': {'convert': 'float', 'field': 'language_preference', 'default': -1}, + 'quality': {'convert': 'float', 'default': -1}, + 'filesize': {'convert': 'bytes'}, + 'fs_approx': {'convert': 'bytes', 'field': 'filesize_approx'}, + 'id': {'convert': 'string', 'field': 'format_id'}, + 'height': {'convert': 'float_none'}, + 'width': {'convert': 'float_none'}, + 'fps': {'convert': 'float_none'}, + 'channels': {'convert': 'float_none', 'field': 'audio_channels'}, + 'tbr': {'convert': 'float_none'}, + 'vbr': {'convert': 'float_none'}, + 'abr': {'convert': 'float_none'}, + 'asr': {'convert': 'float_none'}, + 'source': {'convert': 'float', 'field': 'source_preference', 'default': -1}, + + 'codec': {'type': 'combined', 'field': ('vcodec', 'acodec')}, + 'br': {'type': 'combined', 'field': ('tbr', 'vbr', 'abr'), 'same_limit': True}, + 'size': {'type': 'combined', 'same_limit': True, 'field': ('filesize', 'fs_approx')}, + 'ext': {'type': 'combined', 'field': ('vext', 'aext')}, + 'res': {'type': 'multiple', 'field': ('height', 'width'), + 'function': lambda it: (lambda l: min(l) if l else 0)(tuple(filter(None, it)))}, + + # Actual field names + 'format_id': {'type': 'alias', 'field': 'id'}, + 'preference': {'type': 'alias', 'field': 'ie_pref'}, + 'language_preference': {'type': 'alias', 'field': 'lang'}, + 'source_preference': {'type': 'alias', 'field': 'source'}, + 'protocol': {'type': 'alias', 'field': 'proto'}, + 'filesize_approx': {'type': 'alias', 'field': 'fs_approx'}, + 'audio_channels': {'type': 'alias', 'field': 'channels'}, + + # Deprecated + 'dimension': {'type': 'alias', 'field': 'res', 'deprecated': True}, + 'resolution': {'type': 'alias', 'field': 'res', 'deprecated': True}, + 'extension': {'type': 'alias', 'field': 'ext', 'deprecated': True}, + 'bitrate': {'type': 'alias', 'field': 'br', 'deprecated': True}, + 'total_bitrate': {'type': 'alias', 'field': 'tbr', 'deprecated': True}, + 'video_bitrate': {'type': 'alias', 'field': 'vbr', 'deprecated': True}, + 'audio_bitrate': {'type': 'alias', 'field': 'abr', 'deprecated': True}, + 'framerate': {'type': 'alias', 'field': 'fps', 'deprecated': True}, + 'filesize_estimate': {'type': 'alias', 'field': 'size', 'deprecated': True}, + 'samplerate': {'type': 'alias', 'field': 'asr', 'deprecated': True}, + 'video_ext': {'type': 'alias', 'field': 'vext', 'deprecated': True}, + 'audio_ext': {'type': 'alias', 'field': 'aext', 'deprecated': True}, + 'video_codec': {'type': 'alias', 'field': 'vcodec', 'deprecated': True}, + 'audio_codec': {'type': 'alias', 'field': 'acodec', 'deprecated': True}, + 'video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}, + 'has_video': {'type': 'alias', 'field': 'hasvid', 'deprecated': True}, + 'audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}, + 'has_audio': {'type': 'alias', 'field': 'hasaud', 'deprecated': True}, + 'extractor': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}, + 'extractor_preference': {'type': 'alias', 'field': 'ie_pref', 'deprecated': True}, + } + + def __init__(self, ydl, field_preference): + self.ydl = ydl + self._order = [] + self.evaluate_params(self.ydl.params, field_preference) + if ydl.params.get('verbose'): + self.print_verbose_info(self.ydl.write_debug) + + def _get_field_setting(self, field, key): + if field not in self.settings: + if key in ('forced', 'priority'): + return False + self.ydl.deprecated_feature(f'Using arbitrary fields ({field}) for format sorting is ' + 'deprecated and may be removed in a future version') + self.settings[field] = {} + propObj = self.settings[field] + if key not in propObj: + type = propObj.get('type') + if key == 'field': + default = 'preference' if type == 'extractor' else (field,) if type in ('combined', 'multiple') else field + elif key == 'convert': + default = 'order' if type == 'ordered' else 'float_string' if field else 'ignore' + else: + default = {'type': 'field', 'visible': True, 'order': [], 'not_in_list': (None,)}.get(key, None) + propObj[key] = default + return propObj[key] + + def _resolve_field_value(self, field, value, convertNone=False): + if value is None: + if not convertNone: + return None + else: + value = value.lower() + conversion = self._get_field_setting(field, 'convert') + if conversion == 'ignore': + return None + if conversion == 'string': + return value + elif conversion == 'float_none': + return float_or_none(value) + elif conversion == 'bytes': + return parse_bytes(value) + elif conversion == 'order': + order_list = (self._use_free_order and self._get_field_setting(field, 'order_free')) or self._get_field_setting(field, 'order') + use_regex = self._get_field_setting(field, 'regex') + list_length = len(order_list) + empty_pos = order_list.index('') if '' in order_list else list_length + 1 + if use_regex and value is not None: + for i, regex in enumerate(order_list): + if regex and re.match(regex, value): + return list_length - i + return list_length - empty_pos # not in list + else: # not regex or value = None + return list_length - (order_list.index(value) if value in order_list else empty_pos) + else: + if value.isnumeric(): + return float(value) + else: + self.settings[field]['convert'] = 'string' + return value + + def evaluate_params(self, params, sort_extractor): + self._use_free_order = params.get('prefer_free_formats', False) + self._sort_user = params.get('format_sort', []) + self._sort_extractor = sort_extractor + + def add_item(field, reverse, closest, limit_text): + field = field.lower() + if field in self._order: + return + self._order.append(field) + limit = self._resolve_field_value(field, limit_text) + data = { + 'reverse': reverse, + 'closest': False if limit is None else closest, + 'limit_text': limit_text, + 'limit': limit} + if field in self.settings: + self.settings[field].update(data) + else: + self.settings[field] = data + + sort_list = ( + tuple(field for field in self.default if self._get_field_setting(field, 'forced')) + + (tuple() if params.get('format_sort_force', False) + else tuple(field for field in self.default if self._get_field_setting(field, 'priority'))) + + tuple(self._sort_user) + tuple(sort_extractor) + self.default) + + for item in sort_list: + match = re.match(self.regex, item) + if match is None: + raise ExtractorError('Invalid format sort string "%s" given by extractor' % item) + field = match.group('field') + if field is None: + continue + if self._get_field_setting(field, 'type') == 'alias': + alias, field = field, self._get_field_setting(field, 'field') + if self._get_field_setting(alias, 'deprecated'): + self.ydl.deprecated_feature(f'Format sorting alias {alias} is deprecated and may ' + f'be removed in a future version. Please use {field} instead') + reverse = match.group('reverse') is not None + closest = match.group('separator') == '~' + limit_text = match.group('limit') + + has_limit = limit_text is not None + has_multiple_fields = self._get_field_setting(field, 'type') == 'combined' + has_multiple_limits = has_limit and has_multiple_fields and not self._get_field_setting(field, 'same_limit') + + fields = self._get_field_setting(field, 'field') if has_multiple_fields else (field,) + limits = limit_text.split(':') if has_multiple_limits else (limit_text,) if has_limit else tuple() + limit_count = len(limits) + for (i, f) in enumerate(fields): + add_item(f, reverse, closest, + limits[i] if i < limit_count + else limits[0] if has_limit and not has_multiple_limits + else None) + + def print_verbose_info(self, write_debug): + if self._sort_user: + write_debug('Sort order given by user: %s' % ', '.join(self._sort_user)) + if self._sort_extractor: + write_debug('Sort order given by extractor: %s' % ', '.join(self._sort_extractor)) + write_debug('Formats sorted by: %s' % ', '.join(['%s%s%s' % ( + '+' if self._get_field_setting(field, 'reverse') else '', field, + '%s%s(%s)' % ('~' if self._get_field_setting(field, 'closest') else ':', + self._get_field_setting(field, 'limit_text'), + self._get_field_setting(field, 'limit')) + if self._get_field_setting(field, 'limit_text') is not None else '') + for field in self._order if self._get_field_setting(field, 'visible')])) + + def _calculate_field_preference_from_value(self, format, field, type, value): + reverse = self._get_field_setting(field, 'reverse') + closest = self._get_field_setting(field, 'closest') + limit = self._get_field_setting(field, 'limit') + + if type == 'extractor': + maximum = self._get_field_setting(field, 'max') + if value is None or (maximum is not None and value >= maximum): + value = -1 + elif type == 'boolean': + in_list = self._get_field_setting(field, 'in_list') + not_in_list = self._get_field_setting(field, 'not_in_list') + value = 0 if ((in_list is None or value in in_list) and (not_in_list is None or value not in not_in_list)) else -1 + elif type == 'ordered': + value = self._resolve_field_value(field, value, True) + + # try to convert to number + val_num = float_or_none(value, default=self._get_field_setting(field, 'default')) + is_num = self._get_field_setting(field, 'convert') != 'string' and val_num is not None + if is_num: + value = val_num + + return ((-10, 0) if value is None + else (1, value, 0) if not is_num # if a field has mixed strings and numbers, strings are sorted higher + else (0, -abs(value - limit), value - limit if reverse else limit - value) if closest + else (0, value, 0) if not reverse and (limit is None or value <= limit) + else (0, -value, 0) if limit is None or (reverse and value == limit) or value > limit + else (-1, value, 0)) + + def _calculate_field_preference(self, format, field): + type = self._get_field_setting(field, 'type') # extractor, boolean, ordered, field, multiple + get_value = lambda f: format.get(self._get_field_setting(f, 'field')) + if type == 'multiple': + type = 'field' # Only 'field' is allowed in multiple for now + actual_fields = self._get_field_setting(field, 'field') + + value = self._get_field_setting(field, 'function')(get_value(f) for f in actual_fields) + else: + value = get_value(field) + return self._calculate_field_preference_from_value(format, field, type, value) + + def calculate_preference(self, format): + # Determine missing protocol + if not format.get('protocol'): + format['protocol'] = determine_protocol(format) + + # Determine missing ext + if not format.get('ext') and 'url' in format: + format['ext'] = determine_ext(format['url']) + if format.get('vcodec') == 'none': + format['audio_ext'] = format['ext'] if format.get('acodec') != 'none' else 'none' + format['video_ext'] = 'none' + else: + format['video_ext'] = format['ext'] + format['audio_ext'] = 'none' + # if format.get('preference') is None and format.get('ext') in ('f4f', 'f4m'): # Not supported? + # format['preference'] = -1000 + + # Determine missing bitrates + if format.get('tbr') is None: + if format.get('vbr') is not None and format.get('abr') is not None: + format['tbr'] = format.get('vbr', 0) + format.get('abr', 0) + else: + if format.get('vcodec') != 'none' and format.get('vbr') is None: + format['vbr'] = format.get('tbr') - format.get('abr', 0) + if format.get('acodec') != 'none' and format.get('abr') is None: + format['abr'] = format.get('tbr') - format.get('vbr', 0) + + return tuple(self._calculate_field_preference(format, field) for field in self._order) + + +# Deprecated +has_certifi = bool(certifi) +has_websockets = bool(websockets) |