aboutsummaryrefslogtreecommitdiffstats
path: root/yt_dlp
diff options
context:
space:
mode:
Diffstat (limited to 'yt_dlp')
-rw-r--r--yt_dlp/YoutubeDL.py20
-rw-r--r--yt_dlp/cookies.py4
-rw-r--r--yt_dlp/downloader/common.py1
-rw-r--r--yt_dlp/downloader/niconico.py4
-rw-r--r--yt_dlp/extractor/ciscowebex.py4
-rw-r--r--yt_dlp/extractor/common.py3
-rw-r--r--[-rwxr-xr-x]yt_dlp/extractor/dumpert.py0
-rw-r--r--[-rwxr-xr-x]yt_dlp/extractor/globalplayer.py0
-rw-r--r--yt_dlp/extractor/odnoklassniki.py6
-rw-r--r--yt_dlp/extractor/tvp.py4
-rw-r--r--yt_dlp/extractor/vidio.py2
-rw-r--r--yt_dlp/extractor/youtube.py10
-rw-r--r--yt_dlp/options.py6
-rw-r--r--yt_dlp/utils/_legacy.py6
-rw-r--r--yt_dlp/utils/_utils.py10
15 files changed, 40 insertions, 40 deletions
diff --git a/yt_dlp/YoutubeDL.py b/yt_dlp/YoutubeDL.py
index e51bceef3..7a5e59323 100644
--- a/yt_dlp/YoutubeDL.py
+++ b/yt_dlp/YoutubeDL.py
@@ -259,7 +259,7 @@ class YoutubeDL:
consoletitle: Display progress in console window's titlebar.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
- clean_infojson: Remove private fields from the infojson
+ clean_infojson: Remove internal metadata from the infojson
getcomments: Extract video comments. This will not be written to disk
unless writeinfojson is also given
writeannotations: Write the video annotations to a .annotations.xml file
@@ -1902,7 +1902,7 @@ class YoutubeDL:
continue
entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
- if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
+ if not lazy and 'playlist-index' in self.params['compat_opts']:
playlist_index = ie_result['requested_entries'][i]
entry_copy = collections.ChainMap(entry, {
@@ -2959,8 +2959,7 @@ class YoutubeDL:
print_field('url', 'urls')
print_field('thumbnail', optional=True)
print_field('description', optional=True)
- if filename:
- print_field('filename')
+ print_field('filename')
if self.params.get('forceduration') and info_copy.get('duration') is not None:
self.to_stdout(formatSeconds(info_copy['duration']))
print_field('format')
@@ -3185,7 +3184,6 @@ class YoutubeDL:
return
if info_dict.get('requested_formats') is not None:
- requested_formats = info_dict['requested_formats']
old_ext = info_dict['ext']
if self.params.get('merge_output_format') is None:
if (info_dict['ext'] == 'webm'
@@ -3212,6 +3210,7 @@ class YoutubeDL:
full_filename = correct_ext(full_filename)
temp_filename = correct_ext(temp_filename)
dl_filename = existing_video_file(full_filename, temp_filename)
+
info_dict['__real_download'] = False
merger = FFmpegMergerPP(self)
@@ -3219,12 +3218,12 @@ class YoutubeDL:
if dl_filename is not None:
self.report_file_already_downloaded(dl_filename)
elif fd:
- for f in requested_formats if fd != FFmpegFD else []:
+ for f in info_dict['requested_formats'] if fd != FFmpegFD else []:
f['filepath'] = fname = prepend_extension(
correct_ext(temp_filename, info_dict['ext']),
'f%s' % f['format_id'], info_dict['ext'])
downloaded.append(fname)
- info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
+ info_dict['url'] = '\n'.join(f['url'] for f in info_dict['requested_formats'])
success, real_download = self.dl(temp_filename, info_dict)
info_dict['__real_download'] = real_download
else:
@@ -3248,7 +3247,7 @@ class YoutubeDL:
f'You have requested downloading multiple formats to stdout {reason}. '
'The formats will be streamed one after the other')
fname = temp_filename
- for f in requested_formats:
+ for f in info_dict['requested_formats']:
new_info = dict(info_dict)
del new_info['requested_formats']
new_info.update(f)
@@ -4109,8 +4108,11 @@ class YoutubeDL:
ret.append((thumb_filename, thumb_filename_final))
t['filepath'] = thumb_filename
except network_exceptions as err:
+ if isinstance(err, urllib.error.HTTPError) and err.code == 404:
+ self.to_screen(f'[info] {thumb_display_id.title()} does not exist')
+ else:
+ self.report_warning(f'Unable to download {thumb_display_id}: {err}')
thumbnails.pop(idx)
- self.report_warning(f'Unable to download {thumb_display_id}: {err}')
if ret and not write_all:
break
return ret
diff --git a/yt_dlp/cookies.py b/yt_dlp/cookies.py
index 8693e0b4a..f21e4f7e7 100644
--- a/yt_dlp/cookies.py
+++ b/yt_dlp/cookies.py
@@ -1326,3 +1326,7 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
cookie_req = urllib.request.Request(escape_url(sanitize_url(url)))
self.add_cookie_header(cookie_req)
return cookie_req.get_header('Cookie')
+
+ def clear(self, *args, **kwargs):
+ with contextlib.suppress(KeyError):
+ return super().clear(*args, **kwargs)
diff --git a/yt_dlp/downloader/common.py b/yt_dlp/downloader/common.py
index 477ec3c8a..a0219a350 100644
--- a/yt_dlp/downloader/common.py
+++ b/yt_dlp/downloader/common.py
@@ -49,7 +49,6 @@ class FileDownloader:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
- continuedl: Attempt to continue downloads if possible
throttledratelimit: Assume the download is being throttled below this speed (bytes/sec)
retries: Number of times to retry for expected network errors.
Default is 0 for API, but 10 for CLI
diff --git a/yt_dlp/downloader/niconico.py b/yt_dlp/downloader/niconico.py
index cfe739784..7d8575c2a 100644
--- a/yt_dlp/downloader/niconico.py
+++ b/yt_dlp/downloader/niconico.py
@@ -7,9 +7,9 @@ from .common import FileDownloader
from .external import FFmpegFD
from ..utils import (
DownloadError,
- str_or_none,
- sanitized_Request,
WebSocketsWrapper,
+ sanitized_Request,
+ str_or_none,
try_get,
)
diff --git a/yt_dlp/extractor/ciscowebex.py b/yt_dlp/extractor/ciscowebex.py
index 0fcf02282..40430505d 100644
--- a/yt_dlp/extractor/ciscowebex.py
+++ b/yt_dlp/extractor/ciscowebex.py
@@ -49,7 +49,7 @@ class CiscoWebexIE(InfoExtractor):
'https://%s.webex.com/webappng/api/v1/recordings/%s/stream' % (subdomain, video_id),
video_id, headers=headers, query={'siteurl': siteurl}, expected_status=(403, 429))
- if urlh.status == 403:
+ if urlh.getcode() == 403:
if stream['code'] == 53004:
self.raise_login_required()
if stream['code'] == 53005:
@@ -59,7 +59,7 @@ class CiscoWebexIE(InfoExtractor):
'This video is protected by a password, use the --video-password option', expected=True)
raise ExtractorError(f'{self.IE_NAME} said: {stream["code"]} - {stream["message"]}', expected=True)
- if urlh.status == 429:
+ if urlh.getcode() == 429:
self.raise_login_required(
f'{self.IE_NAME} asks you to solve a CAPTCHA. Solve CAPTCHA in browser and',
method='cookies')
diff --git a/yt_dlp/extractor/common.py b/yt_dlp/extractor/common.py
index f11a67358..9662a7ee1 100644
--- a/yt_dlp/extractor/common.py
+++ b/yt_dlp/extractor/common.py
@@ -17,6 +17,7 @@ import subprocess
import sys
import time
import types
+import urllib.error
import urllib.parse
import urllib.request
import xml.etree.ElementTree
@@ -58,6 +59,7 @@ from ..utils import (
join_nonempty,
js_to_json,
mimetype2ext,
+ netrc_from_content,
network_exceptions,
orderedSet,
parse_bitrate,
@@ -72,7 +74,6 @@ from ..utils import (
smuggle_url,
str_or_none,
str_to_int,
- netrc_from_content,
strip_or_none,
traverse_obj,
truncate_string,
diff --git a/yt_dlp/extractor/dumpert.py b/yt_dlp/extractor/dumpert.py
index 0cf84263c..0cf84263c 100755..100644
--- a/yt_dlp/extractor/dumpert.py
+++ b/yt_dlp/extractor/dumpert.py
diff --git a/yt_dlp/extractor/globalplayer.py b/yt_dlp/extractor/globalplayer.py
index e0c0d58fd..e0c0d58fd 100755..100644
--- a/yt_dlp/extractor/globalplayer.py
+++ b/yt_dlp/extractor/globalplayer.py
diff --git a/yt_dlp/extractor/odnoklassniki.py b/yt_dlp/extractor/odnoklassniki.py
index 0d0ad0bb8..e63714e84 100644
--- a/yt_dlp/extractor/odnoklassniki.py
+++ b/yt_dlp/extractor/odnoklassniki.py
@@ -238,10 +238,8 @@ class OdnoklassnikiIE(InfoExtractor):
def _clear_cookies(self, cdn_url):
# Direct http downloads will fail if CDN cookies are set
# so we need to reset them after each format extraction
- if self._get_cookies('https://notarealsubdomain.mycdn.me/'):
- self.cookiejar.clear(domain='.mycdn.me')
- if self._get_cookies(cdn_url):
- self.cookiejar.clear(domain=urllib.parse.urlparse(cdn_url).hostname)
+ self.cookiejar.clear(domain='.mycdn.me')
+ self.cookiejar.clear(domain=urllib.parse.urlparse(cdn_url).hostname)
@classmethod
def _extract_embed_urls(cls, url, webpage):
diff --git a/yt_dlp/extractor/tvp.py b/yt_dlp/extractor/tvp.py
index 2aa0dd870..c686044fa 100644
--- a/yt_dlp/extractor/tvp.py
+++ b/yt_dlp/extractor/tvp.py
@@ -488,9 +488,9 @@ class TVPVODBaseIE(InfoExtractor):
f'{self._API_BASE_URL}/{resource}', video_id,
query={'lang': 'pl', 'platform': 'BROWSER', **query},
expected_status=lambda x: is_valid(x) or 400 <= x < 500, **kwargs)
- if is_valid(urlh.status):
+ if is_valid(urlh.getcode()):
return document
- raise ExtractorError(f'Woronicza said: {document.get("code")} (HTTP {urlh.status})')
+ raise ExtractorError(f'Woronicza said: {document.get("code")} (HTTP {urlh.getcode()})')
def _parse_video(self, video, with_url=True):
info_dict = traverse_obj(video, {
diff --git a/yt_dlp/extractor/vidio.py b/yt_dlp/extractor/vidio.py
index 770aa284d..23e1aaf20 100644
--- a/yt_dlp/extractor/vidio.py
+++ b/yt_dlp/extractor/vidio.py
@@ -39,7 +39,7 @@ class VidioBaseIE(InfoExtractor):
login_post, login_post_urlh = self._download_webpage_handle(
self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), expected_status=[302, 401])
- if login_post_urlh.status == 401:
+ if login_post_urlh.getcode() == 401:
if get_element_by_class('onboarding-content-register-popup__title', login_post):
raise ExtractorError(
'Unable to log in: The provided email has not registered yet.', expected=True)
diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py
index 4daa4f50e..11e47904a 100644
--- a/yt_dlp/extractor/youtube.py
+++ b/yt_dlp/extractor/youtube.py
@@ -811,7 +811,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'BADGE_STYLE_TYPE_PREMIUM': BadgeType.AVAILABILITY_PREMIUM,
'BADGE_STYLE_TYPE_LIVE_NOW': BadgeType.LIVE_NOW,
'BADGE_STYLE_TYPE_VERIFIED': BadgeType.VERIFIED,
- 'BADGE_STYLE_TYPE_VERIFIED_ARTIST': BadgeType.VERIFIED
+ 'BADGE_STYLE_TYPE_VERIFIED_ARTIST': BadgeType.VERIFIED,
}
label_map = {
@@ -821,7 +821,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'live': BadgeType.LIVE_NOW,
'premium': BadgeType.AVAILABILITY_PREMIUM,
'verified': BadgeType.VERIFIED,
- 'official artist channel': BadgeType.VERIFIED
+ 'official artist channel': BadgeType.VERIFIED,
}
badges = []
@@ -3935,7 +3935,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
f['quality'] = q(itag_qualities.get(try_get(f, lambda f: f['format_id'].split('-')[0]), -1))
if f['quality'] == -1 and f.get('height'):
f['quality'] = q(res_qualities[min(res_qualities, key=lambda x: abs(x - f['height']))])
- if self.get_param('verbose'):
+ if self.get_param('verbose') or all_formats:
f['format_note'] = join_nonempty(f.get('format_note'), client_name, delim=', ')
if f.get('fps') and f['fps'] <= 1:
del f['fps']
@@ -4531,7 +4531,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
and 'no-youtube-prefer-utc-upload-date' not in self.get_param('compat_opts', [])
):
upload_date = strftime_or_none(
- self._parse_time_text(self._get_text(vpir, 'dateText')), '%Y%m%d') or upload_date
+ self._parse_time_text(self._get_text(vpir, 'dateText'))) or upload_date
info['upload_date'] = upload_date
for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
@@ -5071,7 +5071,7 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
last_updated_unix = self._parse_time_text(
self._get_text(playlist_stats, 2) # deprecated, remove when old layout discontinued
or self._get_text(playlist_header_renderer, ('byline', 1, 'playlistBylineRenderer', 'text')))
- info['modified_date'] = strftime_or_none(last_updated_unix, '%Y%m%d')
+ info['modified_date'] = strftime_or_none(last_updated_unix)
info['view_count'] = self._get_count(playlist_stats, 1)
if info['view_count'] is None: # 0 is allowed
diff --git a/yt_dlp/options.py b/yt_dlp/options.py
index b174a24af..9d6dbec9f 100644
--- a/yt_dlp/options.py
+++ b/yt_dlp/options.py
@@ -1414,8 +1414,7 @@ def create_parser():
'--clean-info-json', '--clean-infojson',
action='store_true', dest='clean_infojson', default=None,
help=(
- 'Remove some private fields such as filenames from the infojson. '
- 'Note that it could still contain some personal information (default)'))
+ 'Remove some internal metadata such as filenames from the infojson (default)'))
filesystem.add_option(
'--no-clean-info-json', '--no-clean-infojson',
action='store_false', dest='clean_infojson',
@@ -1678,8 +1677,7 @@ def create_parser():
'Execute a command, optionally prefixed with when to execute it, separated by a ":". '
'Supported values of "WHEN" are the same as that of --use-postprocessor (default: after_move). '
'Same syntax as the output template can be used to pass any field as arguments to the command. '
- 'After download, an additional field "filepath" that contains the final path of the downloaded file '
- 'is also available, and if no fields are passed, %(filepath,_filename|)q is appended to the end of the command. '
+ 'If no fields are passed, %(filepath,_filename|)q is appended to the end of the command. '
'This option can be used multiple times'))
postproc.add_option(
'--no-exec',
diff --git a/yt_dlp/utils/_legacy.py b/yt_dlp/utils/_legacy.py
index 1097778f0..96ac468b1 100644
--- a/yt_dlp/utils/_legacy.py
+++ b/yt_dlp/utils/_legacy.py
@@ -6,7 +6,7 @@ import sys
import urllib.parse
import zlib
-from ._utils import decode_base_n, preferredencoding
+from ._utils import Popen, decode_base_n, preferredencoding
from .traversal import traverse_obj
from ..dependencies import certifi, websockets
@@ -174,3 +174,7 @@ def handle_youtubedl_headers(headers):
del filtered_headers['Youtubedl-no-compression']
return filtered_headers
+
+
+def process_communicate_or_kill(p, *args, **kwargs):
+ return Popen.communicate_or_kill(p, *args, **kwargs)
diff --git a/yt_dlp/utils/_utils.py b/yt_dlp/utils/_utils.py
index 28c2785cb..bc1bc9116 100644
--- a/yt_dlp/utils/_utils.py
+++ b/yt_dlp/utils/_utils.py
@@ -872,12 +872,6 @@ class netrc_from_content(netrc.netrc):
self._parse('-', stream, False)
-def process_communicate_or_kill(p, *args, **kwargs):
- deprecation_warning(f'"{__name__}.process_communicate_or_kill" is deprecated and may be removed '
- f'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead')
- return Popen.communicate_or_kill(p, *args, **kwargs)
-
-
class Popen(subprocess.Popen):
if sys.platform == 'win32':
_startupinfo = subprocess.STARTUPINFO()
@@ -1662,7 +1656,7 @@ def unified_strdate(date_str, day_first=True):
def unified_timestamp(date_str, day_first=True):
- if date_str is None:
+ if not isinstance(date_str, str):
return None
date_str = re.sub(r'\s+', ' ', re.sub(
@@ -2454,7 +2448,7 @@ def request_to_url(req):
return req
-def strftime_or_none(timestamp, date_format, default=None):
+def strftime_or_none(timestamp, date_format='%Y%m%d', default=None):
datetime_object = None
try:
if isinstance(timestamp, (int, float)): # unix timestamp