aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--docs/supportedsites.md7
-rw-r--r--youtube_dlc/extractor/box.py98
-rw-r--r--youtube_dlc/extractor/common.py27
-rw-r--r--youtube_dlc/extractor/extractors.py7
-rw-r--r--youtube_dlc/extractor/franceinter.py3
-rw-r--r--youtube_dlc/extractor/lbry.py5
-rw-r--r--youtube_dlc/extractor/nytimes.py38
-rw-r--r--youtube_dlc/extractor/pinterest.py201
-rw-r--r--youtube_dlc/extractor/rumble.py67
9 files changed, 452 insertions, 1 deletions
diff --git a/docs/supportedsites.md b/docs/supportedsites.md
index 99bb500b6..45a546650 100644
--- a/docs/supportedsites.md
+++ b/docs/supportedsites.md
@@ -110,6 +110,7 @@
- **Bloomberg**
- **BokeCC**
- **BostonGlobe**
+ - **Box**
- **Bpb**: Bundeszentrale für politische Bildung
- **BR**: Bayerischer Rundfunk
- **BravoTV**
@@ -157,6 +158,7 @@
- **Chilloutzone**
- **chirbit**
- **chirbit:profile**
+ - **cielotv.it**
- **Cinchcast**
- **Cinemax**
- **CiscoLiveSearch**
@@ -618,6 +620,7 @@
- **Nuvid**
- **NYTimes**
- **NYTimesArticle**
+ - **NYTimesCooking**
- **NZZ**
- **ocw.mit.edu**
- **OdaTV**
@@ -670,6 +673,8 @@
- **PicartoVod**
- **Piksel**
- **Pinkbike**
+ - **Pinterest**
+ - **PinterestCollection**
- **Pladform**
- **Platzi**
- **PlatziCourse**
@@ -766,6 +771,7 @@
- **RTVNH**
- **RTVS**
- **RUHD**
+ - **RumbleEmbed**
- **rutube**: Rutube videos
- **rutube:channel**: Rutube channels
- **rutube:embed**: Rutube embedded videos
@@ -943,6 +949,7 @@
- **TV2DKBornholmPlay**
- **TV4**: tv4.se and tv4play.se
- **TV5MondePlus**: TV5MONDE+
+ - **tv8.it**
- **TVA**
- **TVANouvelles**
- **TVANouvellesArticle**
diff --git a/youtube_dlc/extractor/box.py b/youtube_dlc/extractor/box.py
new file mode 100644
index 000000000..aae82d1af
--- /dev/null
+++ b/youtube_dlc/extractor/box.py
@@ -0,0 +1,98 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..utils import (
+ determine_ext,
+ parse_iso8601,
+ # try_get,
+ update_url_query,
+)
+
+
+class BoxIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:[^.]+\.)?app\.box\.com/s/(?P<shared_name>[^/]+)/file/(?P<id>\d+)'
+ _TEST = {
+ 'url': 'https://mlssoccer.app.box.com/s/0evd2o3e08l60lr4ygukepvnkord1o1x/file/510727257538',
+ 'md5': '1f81b2fd3960f38a40a3b8823e5fcd43',
+ 'info_dict': {
+ 'id': '510727257538',
+ 'ext': 'mp4',
+ 'title': 'Garber St. Louis will be 28th MLS team +scarving.mp4',
+ 'uploader': 'MLS Video',
+ 'timestamp': 1566320259,
+ 'upload_date': '20190820',
+ 'uploader_id': '235196876',
+ }
+ }
+
+ def _real_extract(self, url):
+ shared_name, file_id = re.match(self._VALID_URL, url).groups()
+ webpage = self._download_webpage(url, file_id)
+ request_token = self._parse_json(self._search_regex(
+ r'Box\.config\s*=\s*({.+?});', webpage,
+ 'Box config'), file_id)['requestToken']
+ access_token = self._download_json(
+ 'https://app.box.com/app-api/enduserapp/elements/tokens', file_id,
+ 'Downloading token JSON metadata',
+ data=json.dumps({'fileIDs': [file_id]}).encode(), headers={
+ 'Content-Type': 'application/json',
+ 'X-Request-Token': request_token,
+ 'X-Box-EndUser-API': 'sharedName=' + shared_name,
+ })[file_id]['read']
+ shared_link = 'https://app.box.com/s/' + shared_name
+ f = self._download_json(
+ 'https://api.box.com/2.0/files/' + file_id, file_id,
+ 'Downloading file JSON metadata', headers={
+ 'Authorization': 'Bearer ' + access_token,
+ 'BoxApi': 'shared_link=' + shared_link,
+ 'X-Rep-Hints': '[dash]', # TODO: extract `hls` formats
+ }, query={
+ 'fields': 'authenticated_download_url,created_at,created_by,description,extension,is_download_available,name,representations,size'
+ })
+ title = f['name']
+
+ query = {
+ 'access_token': access_token,
+ 'shared_link': shared_link
+ }
+
+ formats = []
+
+ # for entry in (try_get(f, lambda x: x['representations']['entries'], list) or []):
+ # entry_url_template = try_get(
+ # entry, lambda x: x['content']['url_template'])
+ # if not entry_url_template:
+ # continue
+ # representation = entry.get('representation')
+ # if representation == 'dash':
+ # TODO: append query to every fragment URL
+ # formats.extend(self._extract_mpd_formats(
+ # entry_url_template.replace('{+asset_path}', 'manifest.mpd'),
+ # file_id, query=query))
+
+ authenticated_download_url = f.get('authenticated_download_url')
+ if authenticated_download_url and f.get('is_download_available'):
+ formats.append({
+ 'ext': f.get('extension') or determine_ext(title),
+ 'filesize': f.get('size'),
+ 'format_id': 'download',
+ 'url': update_url_query(authenticated_download_url, query),
+ })
+
+ self._sort_formats(formats)
+
+ creator = f.get('created_by') or {}
+
+ return {
+ 'id': file_id,
+ 'title': title,
+ 'formats': formats,
+ 'description': f.get('description') or None,
+ 'uploader': creator.get('name'),
+ 'timestamp': parse_iso8601(f.get('created_at')),
+ 'uploader_id': creator.get('id'),
+ }
diff --git a/youtube_dlc/extractor/common.py b/youtube_dlc/extractor/common.py
index 2bc94acdd..aacdf06fe 100644
--- a/youtube_dlc/extractor/common.py
+++ b/youtube_dlc/extractor/common.py
@@ -2597,6 +2597,7 @@ class InfoExtractor(object):
def _extract_akamai_formats(self, manifest_url, video_id, hosts={}):
formats = []
+
hdcore_sign = 'hdcore=3.7.0'
f4m_url = re.sub(r'(https?://[^/]+)/i/', r'\1/z/', manifest_url).replace('/master.m3u8', '/manifest.f4m')
hds_host = hosts.get('hds')
@@ -2609,6 +2610,7 @@ class InfoExtractor(object):
for entry in f4m_formats:
entry.update({'extra_param_to_segment_url': hdcore_sign})
formats.extend(f4m_formats)
+
m3u8_url = re.sub(r'(https?://[^/]+)/z/', r'\1/i/', manifest_url).replace('/manifest.f4m', '/master.m3u8')
hls_host = hosts.get('hls')
if hls_host:
@@ -2616,6 +2618,31 @@ class InfoExtractor(object):
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
+
+ http_host = hosts.get('http')
+ if http_host and 'hdnea=' not in manifest_url:
+ REPL_REGEX = r'https://[^/]+/i/([^,]+),([^/]+),([^/]+).csmil/.+'
+ qualities = re.match(REPL_REGEX, m3u8_url).group(2).split(',')
+ qualities_length = len(qualities)
+ if len(formats) in (qualities_length + 1, qualities_length * 2 + 1):
+ i = 0
+ http_formats = []
+ for f in formats:
+ if f['protocol'] == 'm3u8_native' and f['vcodec'] != 'none':
+ for protocol in ('http', 'https'):
+ http_f = f.copy()
+ del http_f['manifest_url']
+ http_url = re.sub(
+ REPL_REGEX, protocol + r'://%s/\1%s\3' % (http_host, qualities[i]), f['url'])
+ http_f.update({
+ 'format_id': http_f['format_id'].replace('hls-', protocol + '-'),
+ 'url': http_url,
+ 'protocol': protocol,
+ })
+ http_formats.append(http_f)
+ i += 1
+ formats.extend(http_formats)
+
return formats
def _extract_wowza_formats(self, url, video_id, m3u8_entry_protocol='m3u8_native', skip_protocols=[]):
diff --git a/youtube_dlc/extractor/extractors.py b/youtube_dlc/extractor/extractors.py
index 15522f942..c50bdbb79 100644
--- a/youtube_dlc/extractor/extractors.py
+++ b/youtube_dlc/extractor/extractors.py
@@ -126,6 +126,7 @@ from .blinkx import BlinkxIE
from .bloomberg import BloombergIE
from .bokecc import BokeCCIE
from .bostonglobe import BostonGlobeIE
+from .box import BoxIE
from .bpb import BpbIE
from .br import (
BRIE,
@@ -801,6 +802,7 @@ from .ntvru import NTVRuIE
from .nytimes import (
NYTimesIE,
NYTimesArticleIE,
+ NYTimesCookingIE,
)
from .nuvid import NuvidIE
from .nzz import NZZIE
@@ -863,6 +865,10 @@ from .picarto import (
)
from .piksel import PikselIE
from .pinkbike import PinkbikeIE
+from .pinterest import (
+ PinterestIE,
+ PinterestCollectionIE,
+)
from .pladform import PladformIE
from .platzi import (
PlatziIE,
@@ -981,6 +987,7 @@ from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETe
from .rtvnh import RTVNHIE
from .rtvs import RTVSIE
from .ruhd import RUHDIE
+from .rumble import RumbleEmbedIE
from .rutube import (
RutubeIE,
RutubeChannelIE,
diff --git a/youtube_dlc/extractor/franceinter.py b/youtube_dlc/extractor/franceinter.py
index 05806895c..ae822a50e 100644
--- a/youtube_dlc/extractor/franceinter.py
+++ b/youtube_dlc/extractor/franceinter.py
@@ -16,6 +16,7 @@ class FranceInterIE(InfoExtractor):
'ext': 'mp3',
'title': 'Affaire Cahuzac : le contentieux du compte en Suisse',
'description': 'md5:401969c5d318c061f86bda1fa359292b',
+ 'thumbnail': r're:^https?://.*\.jpg',
'upload_date': '20160907',
},
}
@@ -31,6 +32,7 @@ class FranceInterIE(InfoExtractor):
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
+ thumbnail = self._html_search_meta(['og:image', 'twitter:image'], webpage)
upload_date_str = self._search_regex(
r'class=["\']\s*cover-emission-period\s*["\'][^>]*>[^<]+\s+(\d{1,2}\s+[^\s]+\s+\d{4})<',
@@ -48,6 +50,7 @@ class FranceInterIE(InfoExtractor):
'id': video_id,
'title': title,
'description': description,
+ 'thumbnail': thumbnail,
'upload_date': upload_date,
'formats': [{
'url': video_url,
diff --git a/youtube_dlc/extractor/lbry.py b/youtube_dlc/extractor/lbry.py
index 0a7ee919c..6177297ab 100644
--- a/youtube_dlc/extractor/lbry.py
+++ b/youtube_dlc/extractor/lbry.py
@@ -16,7 +16,7 @@ from ..utils import (
class LBRYIE(InfoExtractor):
IE_NAME = 'lbry.tv'
- _VALID_URL = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/(?P<id>@[0-9a-zA-Z-]+:[0-9a-z]+/[0-9a-zA-Z().-]+:[0-9a-z])'
+ _VALID_URL = r'https?://(?:www\.)?(?:lbry\.tv|odysee\.com)/(?P<id>@[^:]+:[0-9a-z]+/[^:]+:[0-9a-z])'
_TESTS = [{
# Video
'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1',
@@ -44,6 +44,9 @@ class LBRYIE(InfoExtractor):
}, {
'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e',
'only_matching': True,
+ }, {
+ 'url': "https://odysee.com/@ScammerRevolts:b0/I-SYSKEY'D-THE-SAME-SCAMMERS-3-TIMES!:b",
+ 'only_matching': True,
}]
def _call_api_proxy(self, method, display_id, params):
diff --git a/youtube_dlc/extractor/nytimes.py b/youtube_dlc/extractor/nytimes.py
index fc78ca56c..976b1c694 100644
--- a/youtube_dlc/extractor/nytimes.py
+++ b/youtube_dlc/extractor/nytimes.py
@@ -221,3 +221,41 @@ class NYTimesArticleIE(NYTimesBaseIE):
r'NYTD\.FlexTypes\.push\s*\(\s*({.+})\s*\)\s*;'),
webpage, 'podcast data')
return self._extract_podcast_from_json(podcast_data, page_id, webpage)
+
+
+class NYTimesCookingIE(NYTimesBaseIE):
+ _VALID_URL = r'https?://cooking\.nytimes\.com/(?:guid|recip)es/(?P<id>\d+)'
+ _TESTS = [{
+ 'url': 'https://cooking.nytimes.com/recipes/1017817-cranberry-curd-tart',
+ 'md5': 'dab81fa2eaeb3f9ed47498bdcfcdc1d3',
+ 'info_dict': {
+ 'id': '100000004756089',
+ 'ext': 'mov',
+ 'timestamp': 1479383008,
+ 'uploader': 'By SHAW LASH, ADAM SAEWITZ and JAMES HERRON',
+ 'title': 'Cranberry Tart',
+ 'upload_date': '20161117',
+ 'description': 'If you are a fan of lemon curd or the classic French tarte au citron, you will love this cranberry version.',
+ },
+ }, {
+ 'url': 'https://cooking.nytimes.com/guides/13-how-to-cook-a-turkey',
+ 'md5': '4b2e8c70530a89b8d905a2b572316eb8',
+ 'info_dict': {
+ 'id': '100000003951728',
+ 'ext': 'mov',
+ 'timestamp': 1445509539,
+ 'description': 'Turkey guide',
+ 'upload_date': '20151022',
+ 'title': 'Turkey',
+ }
+ }]
+
+ def _real_extract(self, url):
+ page_id = self._match_id(url)
+
+ webpage = self._download_webpage(url, page_id)
+
+ video_id = self._search_regex(
+ r'data-video-id=["\'](\d+)', webpage, 'video id')
+
+ return self._extract_video_from_id(video_id)
diff --git a/youtube_dlc/extractor/pinterest.py b/youtube_dlc/extractor/pinterest.py
new file mode 100644
index 000000000..b249c9eda
--- /dev/null
+++ b/youtube_dlc/extractor/pinterest.py
@@ -0,0 +1,201 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+import json
+import re
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+ determine_ext,
+ float_or_none,
+ int_or_none,
+ try_get,
+ unified_timestamp,
+ url_or_none,
+)
+
+
+class PinterestBaseIE(InfoExtractor):
+ _VALID_URL_BASE = r'https?://(?:[^/]+\.)?pinterest\.(?:com|fr|de|ch|jp|cl|ca|it|co\.uk|nz|ru|com\.au|at|pt|co\.kr|es|com\.mx|dk|ph|th|com\.uy|co|nl|info|kr|ie|vn|com\.vn|ec|mx|in|pe|co\.at|hu|co\.in|co\.nz|id|com\.ec|com\.py|tw|be|uk|com\.bo|com\.pe)'
+
+ def _call_api(self, resource, video_id, options):
+ return self._download_json(
+ 'https://www.pinterest.com/resource/%sResource/get/' % resource,
+ video_id, 'Download %s JSON metadata' % resource, query={
+ 'data': json.dumps({'options': options})
+ })['resource_response']
+
+ def _extract_video(self, data, extract_formats=True):
+ video_id = data['id']
+
+ title = (data.get('title') or data.get('grid_title') or video_id).strip()
+
+ formats = []
+ duration = None
+ if extract_formats:
+ for format_id, format_dict in data['videos']['video_list'].items():
+ if not isinstance(format_dict, dict):
+ continue
+ format_url = url_or_none(format_dict.get('url'))
+ if not format_url:
+ continue
+ duration = float_or_none(format_dict.get('duration'), scale=1000)
+ ext = determine_ext(format_url)
+ if 'hls' in format_id.lower() or ext == 'm3u8':
+ formats.extend(self._extract_m3u8_formats(
+ format_url, video_id, 'mp4', entry_protocol='m3u8_native',
+ m3u8_id=format_id, fatal=False))
+ else:
+ formats.append({
+ 'url': format_url,
+ 'format_id': format_id,
+ 'width': int_or_none(format_dict.get('width')),
+ 'height': int_or_none(format_dict.get('height')),
+ 'duration': duration,
+ })
+ self._sort_formats(
+ formats, field_preference=('height', 'width', 'tbr', 'format_id'))
+
+ description = data.get('description') or data.get('description_html') or data.get('seo_description')
+ timestamp = unified_timestamp(data.get('created_at'))
+
+ def _u(field):
+ return try_get(data, lambda x: x['closeup_attribution'][field], compat_str)
+
+ uploader = _u('full_name')
+ uploader_id = _u('id')
+
+ repost_count = int_or_none(data.get('repin_count'))
+ comment_count = int_or_none(data.get('comment_count'))
+ categories = try_get(data, lambda x: x['pin_join']['visual_annotation'], list)
+ tags = data.get('hashtags')
+
+ thumbnails = []
+ images = data.get('images')
+ if isinstance(images, dict):
+ for thumbnail_id, thumbnail in images.items():
+ if not isinstance(thumbnail, dict):
+ continue
+ thumbnail_url = url_or_none(thumbnail.get('url'))
+ if not thumbnail_url:
+ continue
+ thumbnails.append({
+ 'url': thumbnail_url,
+ 'width': int_or_none(thumbnail.get('width')),
+ 'height': int_or_none(thumbnail.get('height')),
+ })
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'description': description,
+ 'duration': duration,
+ 'timestamp': timestamp,
+ 'thumbnails': thumbnails,
+ 'uploader': uploader,
+ 'uploader_id': uploader_id,
+ 'repost_count': repost_count,
+ 'comment_count': comment_count,
+ 'categories': categories,
+ 'tags': tags,
+ 'formats': formats,
+ 'extractor_key': PinterestIE.ie_key(),
+ }
+
+
+class PinterestIE(PinterestBaseIE):
+ _VALID_URL = r'%s/pin/(?P<id>\d+)' % PinterestBaseIE._VALID_URL_BASE
+ _TESTS = [{
+ 'url': 'https://www.pinterest.com/pin/664281013778109217/',
+ 'md5': '6550c2af85d6d9f3fe3b88954d1577fc',
+ 'info_dict': {
+ 'id': '664281013778109217',
+ 'ext': 'mp4',
+ 'title': 'Origami',
+ 'description': 'md5:b9d90ddf7848e897882de9e73344f7dd',
+ 'duration': 57.7,
+ 'timestamp': 1593073622,
+ 'upload_date': '20200625',
+ 'uploader': 'Love origami -I am Dafei',
+ 'uploader_id': '586523688879454212',
+ 'repost_count': 50,
+ 'comment_count': 0,
+ 'categories': list,
+ 'tags': list,
+ },
+ }, {
+ 'url': 'https://co.pinterest.com/pin/824721750502199491/',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ data = self._call_api(
+ 'Pin', video_id, {
+ 'field_set_key': 'unauth_react_main_pin',
+ 'id': video_id,
+ })['data']
+ return self._extract_video(data)
+
+
+class PinterestCollectionIE(PinterestBaseIE):
+ _VALID_URL = r'%s/(?P<username>[^/]+)/(?P<id>[^/?#&]+)' % PinterestBaseIE._VALID_URL_BASE
+ _TESTS = [{
+ 'url': 'https://www.pinterest.ca/mashal0407/cool-diys/',
+ 'info_dict': {
+ 'id': '585890301462791043',
+ 'title': 'cool diys',
+ },
+ 'playlist_count': 8,
+ }, {
+ 'url': 'https://www.pinterest.ca/fudohub/videos/',
+ 'info_dict': {
+ 'id': '682858430939307450',
+ 'title': 'VIDEOS',
+ },
+ 'playlist_mincount': 365,
+ 'skip': 'Test with extract_formats=False',
+ }]
+
+ @classmethod
+ def suitable(cls, url):
+ return False if PinterestIE.suitable(url) else super(
+ PinterestCollectionIE, cls).suitable(url)
+
+ def _real_extract(self, url):
+ username, slug = re.match(self._VALID_URL, url).groups()
+ board = self._call_api(
+ 'Board', slug, {
+ 'slug': slug,
+ 'username': username
+ })['data']
+ board_id = board['id']
+ options = {
+ 'board_id': board_id,
+ 'page_size': 250,
+ }
+ bookmark = None
+ entries = []
+ while True:
+ if bookmark:
+ options['bookmarks'] = [bookmark]
+ board_feed = self._call_api('BoardFeed', board_id, options)
+ for item in (board_feed.get('data') or []):
+ if not isinstance(item, dict) or item.get('type') != 'pin':
+ continue
+ video_id = item.get('id')
+ if video_id:
+ # Some pins may not be available anonymously via pin URL
+ # video = self._extract_video(item, extract_formats=False)
+ # video.update({
+ # '_type': 'url_transparent',
+ # 'url': 'https://www.pinterest.com/pin/%s/' % video_id,
+ # })
+ # entries.append(video)
+ entries.append(self._extract_video(item))
+ bookmark = board_feed.get('bookmark')
+ if not bookmark:
+ break
+ return self.playlist_result(
+ entries, playlist_id=board_id, playlist_title=board.get('name'))
diff --git a/youtube_dlc/extractor/rumble.py b/youtube_dlc/extractor/rumble.py
new file mode 100644
index 000000000..4a0225109
--- /dev/null
+++ b/youtube_dlc/extractor/rumble.py
@@ -0,0 +1,67 @@
+# coding: utf-8
+from __future__ import unicode_literals
+
+from .common import InfoExtractor
+from ..compat import compat_str
+from ..utils import (
+ determine_ext,
+ int_or_none,
+ parse_iso8601,
+ try_get,
+)
+
+
+class RumbleEmbedIE(InfoExtractor):
+ _VALID_URL = r'https?://(?:www\.)?rumble\.com/embed/(?:[0-9a-z]+\.)?(?P<id>[0-9a-z]+)'
+ _TESTS = [{
+ 'url': 'https://rumble.com/embed/v5pv5f',
+ 'md5': '36a18a049856720189f30977ccbb2c34',
+ 'info_dict': {
+ 'id': 'v5pv5f',
+ 'ext': 'mp4',
+ 'title': 'WMAR 2 News Latest Headlines | October 20, 6pm',
+ 'timestamp': 1571611968,
+ 'upload_date': '20191020',
+ }
+ }, {
+ 'url': 'https://rumble.com/embed/ufe9n.v5pv5f',
+ 'only_matching': True,
+ }]
+
+ def _real_extract(self, url):
+ video_id = self._match_id(url)
+ video = self._download_json(
+ 'https://rumble.com/embedJS/', video_id,
+ query={'request': 'video', 'v': video_id})
+ title = video['title']
+
+ formats = []
+ for height, ua in (video.get('ua') or {}).items():
+ for i in range(2):
+ f_url = try_get(ua, lambda x: x[i], compat_str)
+ if f_url:
+ ext = determine_ext(f_url)
+ f = {
+ 'ext': ext,
+ 'format_id': '%s-%sp' % (ext, height),
+ 'height': int_or_none(height),
+ 'url': f_url,
+ }
+ bitrate = try_get(ua, lambda x: x[i + 2]['bitrate'])
+ if bitrate:
+ f['tbr'] = int_or_none(bitrate)
+ formats.append(f)
+ self._sort_formats(formats)
+
+ author = video.get('author') or {}
+
+ return {
+ 'id': video_id,
+ 'title': title,
+ 'formats': formats,
+ 'thumbnail': video.get('i'),
+ 'timestamp': parse_iso8601(video.get('pubDate')),
+ 'channel': author.get('name'),
+ 'channel_url': author.get('url'),
+ 'duration': int_or_none(video.get('duration')),
+ }