aboutsummaryrefslogtreecommitdiffstats
path: root/hypervideo_dl/extractor/archiveorg.py
diff options
context:
space:
mode:
Diffstat (limited to 'hypervideo_dl/extractor/archiveorg.py')
-rw-r--r--hypervideo_dl/extractor/archiveorg.py512
1 files changed, 384 insertions, 128 deletions
diff --git a/hypervideo_dl/extractor/archiveorg.py b/hypervideo_dl/extractor/archiveorg.py
index d90fcb1..2ab3c1b 100644
--- a/hypervideo_dl/extractor/archiveorg.py
+++ b/hypervideo_dl/extractor/archiveorg.py
@@ -3,33 +3,37 @@ from __future__ import unicode_literals
import re
import json
-
from .common import InfoExtractor
-from .youtube import YoutubeIE
+from .youtube import YoutubeIE, YoutubeBaseInfoExtractor
from ..compat import (
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
compat_HTTPError
)
from ..utils import (
+ bug_reports_message,
clean_html,
- determine_ext,
dict_get,
extract_attributes,
ExtractorError,
+ get_element_by_id,
HEADRequest,
int_or_none,
+ join_nonempty,
KNOWN_EXTENSIONS,
merge_dicts,
mimetype2ext,
+ orderedSet,
parse_duration,
parse_qs,
- RegexNotFoundError,
str_to_int,
str_or_none,
+ traverse_obj,
try_get,
unified_strdate,
unified_timestamp,
+ urlhandle_detect_ext,
+ url_or_none
)
@@ -61,7 +65,7 @@ class ArchiveOrgIE(InfoExtractor):
'description': 'md5:43a603fd6c5b4b90d12a96b921212b9c',
'uploader': 'yorkmba99@hotmail.com',
'timestamp': 1387699629,
- 'upload_date': "20131222",
+ 'upload_date': '20131222',
},
}, {
'url': 'http://archive.org/embed/XD300-23_68HighlightsAResearchCntAugHumanIntellect',
@@ -147,8 +151,7 @@ class ArchiveOrgIE(InfoExtractor):
# Archive.org metadata API doesn't clearly demarcate playlist entries
# or subtitle tracks, so we get them from the embeddable player.
- embed_page = self._download_webpage(
- 'https://archive.org/embed/' + identifier, identifier)
+ embed_page = self._download_webpage(f'https://archive.org/embed/{identifier}', identifier)
playlist = self._playlist_data(embed_page)
entries = {}
@@ -163,17 +166,17 @@ class ArchiveOrgIE(InfoExtractor):
'thumbnails': [],
'artist': p.get('artist'),
'track': p.get('title'),
- 'subtitles': {}}
+ 'subtitles': {},
+ }
for track in p.get('tracks', []):
if track['kind'] != 'subtitles':
continue
-
entries[p['orig']][track['label']] = {
- 'url': 'https://archive.org/' + track['file'].lstrip('/')}
+ 'url': 'https://archive.org/' + track['file'].lstrip('/')
+ }
- metadata = self._download_json(
- 'http://archive.org/metadata/' + identifier, identifier)
+ metadata = self._download_json('http://archive.org/metadata/' + identifier, identifier)
m = metadata['metadata']
identifier = m['identifier']
@@ -186,7 +189,7 @@ class ArchiveOrgIE(InfoExtractor):
'license': m.get('licenseurl'),
'release_date': unified_strdate(m.get('date')),
'timestamp': unified_timestamp(dict_get(m, ['publicdate', 'addeddate'])),
- 'webpage_url': 'https://archive.org/details/' + identifier,
+ 'webpage_url': f'https://archive.org/details/{identifier}',
'location': m.get('venue'),
'release_year': int_or_none(m.get('year'))}
@@ -204,7 +207,7 @@ class ArchiveOrgIE(InfoExtractor):
'discnumber': int_or_none(f.get('disc')),
'release_year': int_or_none(f.get('year'))})
entry = entries[f['name']]
- elif f.get('original') in entries:
+ elif traverse_obj(f, 'original', expected_type=str) in entries:
entry = entries[f['original']]
else:
continue
@@ -227,13 +230,12 @@ class ArchiveOrgIE(InfoExtractor):
'filesize': int_or_none(f.get('size')),
'protocol': 'https'})
- # Sort available formats by filesize
for entry in entries.values():
- entry['formats'] = list(sorted(entry['formats'], key=lambda x: x.get('filesize', -1)))
+ self._sort_formats(entry['formats'])
if len(entries) == 1:
# If there's only one item, use it as the main info dict
- only_video = entries[list(entries.keys())[0]]
+ only_video = next(iter(entries.values()))
if entry_id:
info = merge_dicts(only_video, info)
else:
@@ -258,19 +260,19 @@ class ArchiveOrgIE(InfoExtractor):
class YoutubeWebArchiveIE(InfoExtractor):
IE_NAME = 'web.archive:youtube'
- IE_DESC = 'web.archive.org saved youtube videos'
- _VALID_URL = r"""(?x)^
- (?:https?://)?web\.archive\.org/
- (?:web/)?
- (?:[0-9A-Za-z_*]+/)? # /web and the version index is optional
-
- (?:https?(?::|%3[Aa])//)?
- (?:
- (?:\w+\.)?youtube\.com/watch(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
- |(wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
- )
- (?P<id>[0-9A-Za-z_-]{11})(?:%26|\#|&|$)
- """
+ IE_DESC = 'web.archive.org saved youtube videos, "ytarchive:" prefix'
+ _VALID_URL = r'''(?x)(?:(?P<prefix>ytarchive:)|
+ (?:https?://)?web\.archive\.org/
+ (?:web/)?(?:(?P<date>[0-9]{14})?[0-9A-Za-z_*]*/)? # /web and the version index is optional
+ (?:https?(?::|%3[Aa])//)?(?:
+ (?:\w+\.)?youtube\.com(?::(?:80|443))?/watch(?:\.php)?(?:\?|%3[fF])(?:[^\#]+(?:&|%26))?v(?:=|%3[dD]) # Youtube URL
+ |(?:wayback-fakeurl\.archive\.org/yt/) # Or the internal fake url
+ )
+ )(?P<id>[0-9A-Za-z_-]{11})
+ (?(prefix)
+ (?::(?P<date2>[0-9]{14}))?$|
+ (?:%26|[#&]|$)
+ )'''
_TESTS = [
{
@@ -278,141 +280,395 @@ class YoutubeWebArchiveIE(InfoExtractor):
'info_dict': {
'id': 'aYAGB11YrSs',
'ext': 'webm',
- 'title': 'Team Fortress 2 - Sandviches!'
+ 'title': 'Team Fortress 2 - Sandviches!',
+ 'description': 'md5:4984c0f9a07f349fc5d8e82ab7af4eaf',
+ 'upload_date': '20110926',
+ 'uploader': 'Zeurel',
+ 'channel_id': 'UCukCyHaD-bK3in_pKpfH9Eg',
+ 'duration': 32,
+ 'uploader_id': 'Zeurel',
+ 'uploader_url': 'http://www.youtube.com/user/Zeurel'
}
- },
- {
+ }, {
# Internal link
'url': 'https://web.archive.org/web/2oe/http://wayback-fakeurl.archive.org/yt/97t7Xj_iBv0',
'info_dict': {
'id': '97t7Xj_iBv0',
'ext': 'mp4',
- 'title': 'How Flexible Machines Could Save The World'
+ 'title': 'Why Machines That Bend Are Better',
+ 'description': 'md5:00404df2c632d16a674ff8df1ecfbb6c',
+ 'upload_date': '20190312',
+ 'uploader': 'Veritasium',
+ 'channel_id': 'UCHnyfMqiRRG1u-2MsSQLbXA',
+ 'duration': 771,
+ 'uploader_id': '1veritasium',
+ 'uploader_url': 'http://www.youtube.com/user/1veritasium'
}
- },
- {
- # Video from 2012, webm format itag 45.
+ }, {
+ # Video from 2012, webm format itag 45. Newest capture is deleted video, with an invalid description.
+ # Should use the date in the link. Title ends with '- Youtube'. Capture has description in eow-description
'url': 'https://web.archive.org/web/20120712231619/http://www.youtube.com/watch?v=AkhihxRKcrs&gl=US&hl=en',
'info_dict': {
'id': 'AkhihxRKcrs',
'ext': 'webm',
- 'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)'
+ 'title': 'Limited Run: Mondo\'s Modern Classic 1 of 3 (SDCC 2012)',
+ 'upload_date': '20120712',
+ 'duration': 398,
+ 'description': 'md5:ff4de6a7980cb65d951c2f6966a4f2f3',
+ 'uploader_id': 'machinima',
+ 'uploader_url': 'http://www.youtube.com/user/machinima'
}
- },
- {
- # Old flash-only video. Webpage title starts with "YouTube - ".
+ }, {
+ # FLV video. Video file URL does not provide itag information
'url': 'https://web.archive.org/web/20081211103536/http://www.youtube.com/watch?v=jNQXAC9IVRw',
'info_dict': {
'id': 'jNQXAC9IVRw',
- 'ext': 'unknown_video',
- 'title': 'Me at the zoo'
+ 'ext': 'flv',
+ 'title': 'Me at the zoo',
+ 'upload_date': '20050423',
+ 'channel_id': 'UC4QobU6STFB0P71PMvOGN5A',
+ 'duration': 19,
+ 'description': 'md5:10436b12e07ac43ff8df65287a56efb4',
+ 'uploader_id': 'jawed',
+ 'uploader_url': 'http://www.youtube.com/user/jawed'
}
- },
- {
- # Flash video with .flv extension (itag 34). Title has prefix "YouTube -"
- # Title has some weird unicode characters too.
+ }, {
'url': 'https://web.archive.org/web/20110712231407/http://www.youtube.com/watch?v=lTx3G6h2xyA',
'info_dict': {
'id': 'lTx3G6h2xyA',
'ext': 'flv',
- 'title': '‪Madeon - Pop Culture (live mashup)‬‏'
+ 'title': 'Madeon - Pop Culture (live mashup)',
+ 'upload_date': '20110711',
+ 'uploader': 'Madeon',
+ 'channel_id': 'UCqMDNf3Pn5L7pcNkuSEeO3w',
+ 'duration': 204,
+ 'description': 'md5:f7535343b6eda34a314eff8b85444680',
+ 'uploader_id': 'itsmadeon',
+ 'uploader_url': 'http://www.youtube.com/user/itsmadeon'
}
- },
- { # Some versions of Youtube have have "YouTube" as page title in html (and later rewritten by js).
- 'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
+ }, {
+ # First capture is of dead video, second is the oldest from CDX response.
+ 'url': 'https://web.archive.org/https://www.youtube.com/watch?v=1JYutPM8O6E',
'info_dict': {
- 'id': 'kH-G_aIBlFw',
+ 'id': '1JYutPM8O6E',
'ext': 'mp4',
- 'title': 'kH-G_aIBlFw'
- },
- 'expected_warnings': [
- 'unable to extract title',
- ]
- },
- {
- # First capture is a 302 redirect intermediary page.
- 'url': 'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=0altSZ96U4M',
+ 'title': 'Fake Teen Doctor Strikes AGAIN! - Weekly Weird News',
+ 'upload_date': '20160218',
+ 'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
+ 'duration': 1236,
+ 'description': 'md5:21032bae736421e89c2edf36d1936947',
+ 'uploader_id': 'MachinimaETC',
+ 'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
+ }
+ }, {
+ # First capture of dead video, capture date in link links to dead capture.
+ 'url': 'https://web.archive.org/web/20180803221945/https://www.youtube.com/watch?v=6FPhZJGvf4E',
'info_dict': {
- 'id': '0altSZ96U4M',
+ 'id': '6FPhZJGvf4E',
'ext': 'mp4',
- 'title': '0altSZ96U4M'
+ 'title': 'WTF: Video Games Still Launch BROKEN?! - T.U.G.S.',
+ 'upload_date': '20160219',
+ 'channel_id': 'UCdIaNUarhzLSXGoItz7BHVA',
+ 'duration': 798,
+ 'description': 'md5:a1dbf12d9a3bd7cb4c5e33b27d77ffe7',
+ 'uploader_id': 'MachinimaETC',
+ 'uploader_url': 'http://www.youtube.com/user/MachinimaETC'
},
'expected_warnings': [
- 'unable to extract title',
+ r'unable to download capture webpage \(it may not be archived\)'
]
- },
- {
+ }, { # Very old YouTube page, has - YouTube in title.
+ 'url': 'http://web.archive.org/web/20070302011044/http://youtube.com/watch?v=-06-KB9XTzg',
+ 'info_dict': {
+ 'id': '-06-KB9XTzg',
+ 'ext': 'flv',
+ 'title': 'New Coin Hack!! 100% Safe!!'
+ }
+ }, {
+ 'url': 'web.archive.org/https://www.youtube.com/watch?v=dWW7qP423y8',
+ 'info_dict': {
+ 'id': 'dWW7qP423y8',
+ 'ext': 'mp4',
+ 'title': 'It\'s Bootleg AirPods Time.',
+ 'upload_date': '20211021',
+ 'channel_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
+ 'channel_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug',
+ 'duration': 810,
+ 'description': 'md5:7b567f898d8237b256f36c1a07d6d7bc',
+ 'uploader': 'DankPods',
+ 'uploader_id': 'UC7Jwj9fkrf1adN4fMmTkpug',
+ 'uploader_url': 'http://www.youtube.com/channel/UC7Jwj9fkrf1adN4fMmTkpug'
+ }
+ }, {
+ # player response contains '};' See: https://github.com/ytdl-org/youtube-dl/issues/27093
+ 'url': 'https://web.archive.org/web/20200827003909if_/http://www.youtube.com/watch?v=6Dh-RL__uN4',
+ 'info_dict': {
+ 'id': '6Dh-RL__uN4',
+ 'ext': 'mp4',
+ 'title': 'bitch lasagna',
+ 'upload_date': '20181005',
+ 'channel_id': 'UC-lHJZR3Gqxm24_Vd_AJ5Yw',
+ 'channel_url': 'http://www.youtube.com/channel/UC-lHJZR3Gqxm24_Vd_AJ5Yw',
+ 'duration': 135,
+ 'description': 'md5:2dbe4051feeff2dab5f41f82bb6d11d0',
+ 'uploader': 'PewDiePie',
+ 'uploader_id': 'PewDiePie',
+ 'uploader_url': 'http://www.youtube.com/user/PewDiePie'
+ }
+ }, {
+ 'url': 'https://web.archive.org/web/http://www.youtube.com/watch?v=kH-G_aIBlFw',
+ 'only_matching': True
+ }, {
+ 'url': 'https://web.archive.org/web/20050214000000_if/http://www.youtube.com/watch?v=0altSZ96U4M',
+ 'only_matching': True
+ }, {
# Video not archived, only capture is unavailable video page
'url': 'https://web.archive.org/web/20210530071008/https://www.youtube.com/watch?v=lHJTf93HL1s&spfreload=10',
- 'only_matching': True,
- },
- { # Encoded url
+ 'only_matching': True
+ }, { # Encoded url
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fgl%3DUS%26v%3DAkhihxRKcrs%26hl%3Den',
- 'only_matching': True,
- },
- {
+ 'only_matching': True
+ }, {
'url': 'https://web.archive.org/web/20120712231619/http%3A//www.youtube.com/watch%3Fv%3DAkhihxRKcrs%26gl%3DUS%26hl%3Den',
- 'only_matching': True,
- }
+ 'only_matching': True
+ }, {
+ 'url': 'https://web.archive.org/web/20060527081937/http://www.youtube.com:80/watch.php?v=ELTFsLT73fA&amp;search=soccer',
+ 'only_matching': True
+ }, {
+ 'url': 'https://web.archive.org/http://www.youtube.com:80/watch?v=-05VVye-ffg',
+ 'only_matching': True
+ }, {
+ 'url': 'ytarchive:BaW_jenozKc:20050214000000',
+ 'only_matching': True
+ }, {
+ 'url': 'ytarchive:BaW_jenozKc',
+ 'only_matching': True
+ },
]
+ _YT_INITIAL_DATA_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialData["\']\s*\]|ytInitialData)\s*=\s*({.+?})\s*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_DATA_RE
+ _YT_INITIAL_PLAYER_RESPONSE_RE = r'(?:(?:(?:window\s*\[\s*["\']ytInitialPlayerResponse["\']\s*\]|ytInitialPlayerResponse)\s*=[(\s]*({.+?})[)\s]*;)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_PLAYER_RESPONSE_RE
+ _YT_INITIAL_BOUNDARY_RE = r'(?:(?:var\s+meta|</script|\n)|%s)' % YoutubeBaseInfoExtractor._YT_INITIAL_BOUNDARY_RE
+
+ _YT_DEFAULT_THUMB_SERVERS = ['i.ytimg.com'] # thumbnails most likely archived on these servers
+ _YT_ALL_THUMB_SERVERS = orderedSet(
+ _YT_DEFAULT_THUMB_SERVERS + ['img.youtube.com', *[f'{c}{n or ""}.ytimg.com' for c in ('i', 's') for n in (*range(0, 5), 9)]])
+
+ _WAYBACK_BASE_URL = 'https://web.archive.org/web/%sif_/'
+ _OLDEST_CAPTURE_DATE = 20050214000000
+ _NEWEST_CAPTURE_DATE = 20500101000000
+
+ def _call_cdx_api(self, item_id, url, filters: list = None, collapse: list = None, query: dict = None, note=None, fatal=False):
+ # CDX docs: https://github.com/internetarchive/wayback/blob/master/wayback-cdx-server/README.md
+ query = {
+ 'url': url,
+ 'output': 'json',
+ 'fl': 'original,mimetype,length,timestamp',
+ 'limit': 500,
+ 'filter': ['statuscode:200'] + (filters or []),
+ 'collapse': collapse or [],
+ **(query or {})
+ }
+ res = self._download_json(
+ 'https://web.archive.org/cdx/search/cdx', item_id,
+ note or 'Downloading CDX API JSON', query=query, fatal=fatal)
+ if isinstance(res, list) and len(res) >= 2:
+ # format response to make it easier to use
+ return list(dict(zip(res[0], v)) for v in res[1:])
+ elif not isinstance(res, list) or len(res) != 0:
+ self.report_warning('Error while parsing CDX API response' + bug_reports_message())
+
+ def _extract_yt_initial_variable(self, webpage, regex, video_id, name):
+ return self._parse_json(self._search_regex(
+ (r'%s\s*%s' % (regex, self._YT_INITIAL_BOUNDARY_RE),
+ regex), webpage, name, default='{}'), video_id, fatal=False)
+
+ def _extract_webpage_title(self, webpage):
+ page_title = self._html_extract_title(webpage, default='')
+ # YouTube video pages appear to always have either 'YouTube -' as prefix or '- YouTube' as suffix.
+ return self._html_search_regex(
+ r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
+ page_title, 'title', default='')
+
+ def _extract_metadata(self, video_id, webpage):
+ search_meta = ((lambda x: self._html_search_meta(x, webpage, default=None)) if webpage else (lambda x: None))
+ player_response = self._extract_yt_initial_variable(
+ webpage, self._YT_INITIAL_PLAYER_RESPONSE_RE, video_id, 'initial player response') or {}
+ initial_data = self._extract_yt_initial_variable(
+ webpage, self._YT_INITIAL_DATA_RE, video_id, 'initial player response') or {}
+
+ initial_data_video = traverse_obj(
+ initial_data, ('contents', 'twoColumnWatchNextResults', 'results', 'results', 'contents', ..., 'videoPrimaryInfoRenderer'),
+ expected_type=dict, get_all=False, default={})
+
+ video_details = traverse_obj(
+ player_response, 'videoDetails', expected_type=dict, get_all=False, default={})
+
+ microformats = traverse_obj(
+ player_response, ('microformat', 'playerMicroformatRenderer'), expected_type=dict, get_all=False, default={})
+
+ video_title = (
+ video_details.get('title')
+ or YoutubeBaseInfoExtractor._get_text(microformats, 'title')
+ or YoutubeBaseInfoExtractor._get_text(initial_data_video, 'title')
+ or self._extract_webpage_title(webpage)
+ or search_meta(['og:title', 'twitter:title', 'title']))
+
+ channel_id = str_or_none(
+ video_details.get('channelId')
+ or microformats.get('externalChannelId')
+ or search_meta('channelId')
+ or self._search_regex(
+ r'data-channel-external-id=(["\'])(?P<id>(?:(?!\1).)+)\1', # @b45a9e6
+ webpage, 'channel id', default=None, group='id'))
+ channel_url = f'http://www.youtube.com/channel/{channel_id}' if channel_id else None
+
+ duration = int_or_none(
+ video_details.get('lengthSeconds')
+ or microformats.get('lengthSeconds')
+ or parse_duration(search_meta('duration')))
+ description = (
+ video_details.get('shortDescription')
+ or YoutubeBaseInfoExtractor._get_text(microformats, 'description')
+ or clean_html(get_element_by_id('eow-description', webpage)) # @9e6dd23
+ or search_meta(['description', 'og:description', 'twitter:description']))
+
+ uploader = video_details.get('author')
+
+ # Uploader ID and URL
+ uploader_mobj = re.search(
+ r'<link itemprop="url" href="(?P<uploader_url>https?://www\.youtube\.com/(?:user|channel)/(?P<uploader_id>[^"]+))">', # @fd05024
+ webpage)
+ if uploader_mobj is not None:
+ uploader_id, uploader_url = uploader_mobj.group('uploader_id'), uploader_mobj.group('uploader_url')
+ else:
+ # @a6211d2
+ uploader_url = url_or_none(microformats.get('ownerProfileUrl'))
+ uploader_id = self._search_regex(
+ r'(?:user|channel)/([^/]+)', uploader_url or '', 'uploader id', default=None)
+
+ upload_date = unified_strdate(
+ dict_get(microformats, ('uploadDate', 'publishDate'))
+ or search_meta(['uploadDate', 'datePublished'])
+ or self._search_regex(
+ [r'(?s)id="eow-date.*?>(.*?)</span>',
+ r'(?:id="watch-uploader-info".*?>.*?|["\']simpleText["\']\s*:\s*["\'])(?:Published|Uploaded|Streamed live|Started) on (.+?)[<"\']'], # @7998520
+ webpage, 'upload date', default=None))
+
+ return {
+ 'title': video_title,
+ 'description': description,
+ 'upload_date': upload_date,
+ 'uploader': uploader,
+ 'channel_id': channel_id,
+ 'channel_url': channel_url,
+ 'duration': duration,
+ 'uploader_url': uploader_url,
+ 'uploader_id': uploader_id,
+ }
+
+ def _extract_thumbnails(self, video_id):
+ try_all = 'thumbnails' in self._configuration_arg('check_all')
+ thumbnail_base_urls = ['http://{server}/vi{webp}/{video_id}'.format(
+ webp='_webp' if ext == 'webp' else '', video_id=video_id, server=server)
+ for server in (self._YT_ALL_THUMB_SERVERS if try_all else self._YT_DEFAULT_THUMB_SERVERS) for ext in (('jpg', 'webp') if try_all else ('jpg',))]
+
+ thumbnails = []
+ for url in thumbnail_base_urls:
+ response = self._call_cdx_api(
+ video_id, url, filters=['mimetype:image/(?:webp|jpeg)'],
+ collapse=['urlkey'], query={'matchType': 'prefix'})
+ if not response:
+ continue
+ thumbnails.extend(
+ {
+ 'url': (self._WAYBACK_BASE_URL % (int_or_none(thumbnail_dict.get('timestamp')) or self._OLDEST_CAPTURE_DATE)) + thumbnail_dict.get('original'),
+ 'filesize': int_or_none(thumbnail_dict.get('length')),
+ 'preference': int_or_none(thumbnail_dict.get('length'))
+ } for thumbnail_dict in response)
+ if not try_all:
+ break
+
+ self._remove_duplicate_formats(thumbnails)
+ return thumbnails
+
+ def _get_capture_dates(self, video_id, url_date):
+ capture_dates = []
+ # Note: CDX API will not find watch pages with extra params in the url.
+ response = self._call_cdx_api(
+ video_id, f'https://www.youtube.com/watch?v={video_id}',
+ filters=['mimetype:text/html'], collapse=['timestamp:6', 'digest'], query={'matchType': 'prefix'}) or []
+ all_captures = sorted([int_or_none(r['timestamp']) for r in response if int_or_none(r['timestamp']) is not None])
+
+ # Prefer the new polymer UI captures as we support extracting more metadata from them
+ # WBM captures seem to all switch to this layout ~July 2020
+ modern_captures = [x for x in all_captures if x >= 20200701000000]
+ if modern_captures:
+ capture_dates.append(modern_captures[0])
+ capture_dates.append(url_date)
+ if all_captures:
+ capture_dates.append(all_captures[0])
+
+ if 'captures' in self._configuration_arg('check_all'):
+ capture_dates.extend(modern_captures + all_captures)
+
+ # Fallbacks if any of the above fail
+ capture_dates.extend([self._OLDEST_CAPTURE_DATE, self._NEWEST_CAPTURE_DATE])
+ return orderedSet(filter(None, capture_dates))
def _real_extract(self, url):
- video_id = self._match_id(url)
- title = video_id # if we are not able get a title
-
- def _extract_title(webpage):
- page_title = self._html_search_regex(
- r'<title>([^<]*)</title>', webpage, 'title', fatal=False) or ''
- # YouTube video pages appear to always have either 'YouTube -' as suffix or '- YouTube' as prefix.
- try:
- page_title = self._html_search_regex(
- r'(?:YouTube\s*-\s*(.*)$)|(?:(.*)\s*-\s*YouTube$)',
- page_title, 'title', default='')
- except RegexNotFoundError:
- page_title = None
-
- if not page_title:
- self.report_warning('unable to extract title', video_id=video_id)
- return
- return page_title
-
- # If the video is no longer available, the oldest capture may be one before it was removed.
- # Setting the capture date in url to early date seems to redirect to earliest capture.
- webpage = self._download_webpage(
- 'https://web.archive.org/web/20050214000000/http://www.youtube.com/watch?v=%s' % video_id,
- video_id=video_id, fatal=False, errnote='unable to download video webpage (probably not archived).')
- if webpage:
- title = _extract_title(webpage) or title
-
- # Use link translator mentioned in https://github.com/ytdl-org/youtube-dl/issues/13655
- internal_fake_url = 'https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id
+ video_id, url_date, url_date_2 = self._match_valid_url(url).group('id', 'date', 'date2')
+ url_date = url_date or url_date_2
+
+ urlh = None
try:
- video_file_webpage = self._request_webpage(
- HEADRequest(internal_fake_url), video_id,
- note='Fetching video file url', expected_status=True)
+ urlh = self._request_webpage(
+ HEADRequest('https://web.archive.org/web/2oe_/http://wayback-fakeurl.archive.org/yt/%s' % video_id),
+ video_id, note='Fetching archived video file url', expected_status=True)
except ExtractorError as e:
# HTTP Error 404 is expected if the video is not saved.
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
- raise ExtractorError(
- 'HTTP Error %s. Most likely the video is not archived or issue with web.archive.org.' % e.cause.code,
+ self.raise_no_formats(
+ 'The requested video is not archived, indexed, or there is an issue with web.archive.org',
expected=True)
- raise
- video_file_url = compat_urllib_parse_unquote(video_file_webpage.url)
- video_file_url_qs = parse_qs(video_file_url)
-
- # Attempt to recover any ext & format info from playback url
- format = {'url': video_file_url}
- itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
- if itag and itag in YoutubeIE._formats: # Naughty access but it works
- format.update(YoutubeIE._formats[itag])
- format.update({'format_id': itag})
- else:
- mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
- ext = mimetype2ext(mime) or determine_ext(video_file_url)
- format.update({'ext': ext})
- return {
- 'id': video_id,
- 'title': title,
- 'formats': [format],
- 'duration': str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
- }
+ else:
+ raise
+
+ capture_dates = self._get_capture_dates(video_id, int_or_none(url_date))
+ self.write_debug('Captures to try: ' + join_nonempty(*capture_dates, delim=', '))
+ info = {'id': video_id}
+ for capture in capture_dates:
+ webpage = self._download_webpage(
+ (self._WAYBACK_BASE_URL + 'http://www.youtube.com/watch?v=%s') % (capture, video_id),
+ video_id=video_id, fatal=False, errnote='unable to download capture webpage (it may not be archived)',
+ note='Downloading capture webpage')
+ current_info = self._extract_metadata(video_id, webpage or '')
+ # Try avoid getting deleted video metadata
+ if current_info.get('title'):
+ info = merge_dicts(info, current_info)
+ if 'captures' not in self._configuration_arg('check_all'):
+ break
+
+ info['thumbnails'] = self._extract_thumbnails(video_id)
+
+ if urlh:
+ url = compat_urllib_parse_unquote(urlh.geturl())
+ video_file_url_qs = parse_qs(url)
+ # Attempt to recover any ext & format info from playback url & response headers
+ format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
+ itag = try_get(video_file_url_qs, lambda x: x['itag'][0])
+ if itag and itag in YoutubeIE._formats:
+ format.update(YoutubeIE._formats[itag])
+ format.update({'format_id': itag})
+ else:
+ mime = try_get(video_file_url_qs, lambda x: x['mime'][0])
+ ext = (mimetype2ext(mime)
+ or urlhandle_detect_ext(urlh)
+ or mimetype2ext(urlh.headers.get('x-archive-guessed-content-type')))
+ format.update({'ext': ext})
+ info['formats'] = [format]
+ if not info.get('duration'):
+ info['duration'] = str_to_int(try_get(video_file_url_qs, lambda x: x['dur'][0]))
+
+ if not info.get('title'):
+ info['title'] = video_id
+ return info