aboutsummaryrefslogtreecommitdiffstats
path: root/hypervideo_dl/extractor/lbry.py
diff options
context:
space:
mode:
Diffstat (limited to 'hypervideo_dl/extractor/lbry.py')
-rw-r--r--hypervideo_dl/extractor/lbry.py140
1 files changed, 87 insertions, 53 deletions
diff --git a/hypervideo_dl/extractor/lbry.py b/hypervideo_dl/extractor/lbry.py
index b5def1e..9a9f925 100644
--- a/hypervideo_dl/extractor/lbry.py
+++ b/hypervideo_dl/extractor/lbry.py
@@ -1,18 +1,22 @@
import functools
import json
+import re
+import urllib.parse
from .common import InfoExtractor
-from ..compat import compat_str, compat_urllib_parse_unquote
+from ..networking import HEADRequest
from ..utils import (
ExtractorError,
- HEADRequest,
OnDemandPagedList,
UnsupportedError,
determine_ext,
int_or_none,
mimetype2ext,
parse_qs,
+ traverse_obj,
try_get,
+ url_or_none,
+ urlhandle_detect_ext,
urljoin,
)
@@ -52,38 +56,25 @@ class LBRYBaseIE(InfoExtractor):
'/%s:%s' % (claim_name, claim_id))
def _parse_stream(self, stream, url):
- stream_value = stream.get('value') or {}
- stream_type = stream_value.get('stream_type')
- source = stream_value.get('source') or {}
- media = stream_value.get(stream_type) or {}
- signing_channel = stream.get('signing_channel') or {}
- channel_name = signing_channel.get('name')
- channel_claim_id = signing_channel.get('claim_id')
- channel_url = None
- if channel_name and channel_claim_id:
- channel_url = self._permanent_url(url, channel_name, channel_claim_id)
+ stream_type = traverse_obj(stream, ('value', 'stream_type', {str}))
+
+ info = traverse_obj(stream, {
+ 'title': ('value', 'title', {str}),
+ 'thumbnail': ('value', 'thumbnail', 'url', {url_or_none}),
+ 'description': ('value', 'description', {str}),
+ 'license': ('value', 'license', {str}),
+ 'timestamp': ('timestamp', {int_or_none}),
+ 'release_timestamp': ('value', 'release_time', {int_or_none}),
+ 'tags': ('value', 'tags', ..., {lambda x: x or None}),
+ 'duration': ('value', stream_type, 'duration', {int_or_none}),
+ 'channel': ('signing_channel', 'value', 'title', {str}),
+ 'channel_id': ('signing_channel', 'claim_id', {str}),
+ })
+
+ channel_name = traverse_obj(stream, ('signing_channel', 'name', {str}))
+ if channel_name and info.get('channel_id'):
+ info['channel_url'] = self._permanent_url(url, channel_name, info['channel_id'])
- info = {
- 'thumbnail': try_get(stream_value, lambda x: x['thumbnail']['url'], compat_str),
- 'description': stream_value.get('description'),
- 'license': stream_value.get('license'),
- 'timestamp': int_or_none(stream.get('timestamp')),
- 'release_timestamp': int_or_none(stream_value.get('release_time')),
- 'tags': stream_value.get('tags'),
- 'duration': int_or_none(media.get('duration')),
- 'channel': try_get(signing_channel, lambda x: x['value']['title']),
- 'channel_id': channel_claim_id,
- 'channel_url': channel_url,
- 'ext': determine_ext(source.get('name')) or mimetype2ext(source.get('media_type')),
- 'filesize': int_or_none(source.get('size')),
- }
- if stream_type == 'audio':
- info['vcodec'] = 'none'
- else:
- info.update({
- 'width': int_or_none(media.get('width')),
- 'height': int_or_none(media.get('height')),
- })
return info
@@ -93,7 +84,7 @@ class LBRYIE(LBRYBaseIE):
_TESTS = [{
# Video
'url': 'https://lbry.tv/@Mantega:1/First-day-LBRY:1',
- 'md5': 'fffd15d76062e9a985c22c7c7f2f4805',
+ 'md5': '65bd7ec1f6744ada55da8e4c48a2edf9',
'info_dict': {
'id': '17f983b61f53091fb8ea58a9c56804e4ff8cff4d',
'ext': 'mp4',
@@ -142,9 +133,8 @@ class LBRYIE(LBRYBaseIE):
'license': 'None',
}
}, {
- # HLS
'url': 'https://odysee.com/@gardeningincanada:b/plants-i-will-never-grow-again.-the:e',
- 'md5': '25049011f3c8bc2f8b60ad88a031837e',
+ 'md5': 'c35fac796f62a14274b4dc2addb5d0ba',
'info_dict': {
'id': 'e51671357333fe22ae88aad320bde2f6f96b1410',
'ext': 'mp4',
@@ -187,6 +177,28 @@ class LBRYIE(LBRYBaseIE):
},
'params': {'skip_download': True}
}, {
+ # original quality format w/higher resolution than HLS formats
+ 'url': 'https://odysee.com/@wickedtruths:2/Biotechnological-Invasion-of-Skin-(April-2023):4',
+ 'md5': '305b0b3b369bde1b984961f005b67193',
+ 'info_dict': {
+ 'id': '41fbfe805eb73c8d3012c0c49faa0f563274f634',
+ 'ext': 'mp4',
+ 'title': 'Biotechnological Invasion of Skin (April 2023)',
+ 'description': 'md5:709a2f4c07bd8891cda3a7cc2d6fcf5c',
+ 'channel': 'Wicked Truths',
+ 'channel_id': '23d2bbf856b0ceed5b1d7c5960bcc72da5a20cb0',
+ 'channel_url': 'https://odysee.com/@wickedtruths:23d2bbf856b0ceed5b1d7c5960bcc72da5a20cb0',
+ 'timestamp': 1685790036,
+ 'upload_date': '20230603',
+ 'release_timestamp': 1685617473,
+ 'release_date': '20230601',
+ 'duration': 1063,
+ 'thumbnail': 'https://thumbs.odycdn.com/4e6d39da4df0cfdad45f64e253a15959.webp',
+ 'tags': ['smart skin surveillance', 'biotechnology invasion of skin', 'morgellons'],
+ 'license': 'None',
+ 'protocol': 'https', # test for direct mp4 download
+ },
+ }, {
'url': 'https://odysee.com/@BrodieRobertson:5/apple-is-tracking-everything-you-do-on:e',
'only_matching': True,
}, {
@@ -221,41 +233,65 @@ class LBRYIE(LBRYBaseIE):
display_id = display_id.split('/', 2)[-1].replace('/', ':')
else:
display_id = display_id.replace(':', '#')
- display_id = compat_urllib_parse_unquote(display_id)
+ display_id = urllib.parse.unquote(display_id)
uri = 'lbry://' + display_id
result = self._resolve_url(uri, display_id, 'stream')
headers = {'Referer': 'https://odysee.com/'}
- if result['value'].get('stream_type') in self._SUPPORTED_STREAM_TYPES:
+
+ formats = []
+ stream_type = traverse_obj(result, ('value', 'stream_type', {str}))
+
+ if stream_type in self._SUPPORTED_STREAM_TYPES:
claim_id, is_live = result['claim_id'], False
streaming_url = self._call_api_proxy(
'get', claim_id, {'uri': uri}, 'streaming url')['streaming_url']
+
+ # GET request to v3 API returns original video/audio file if available
+ direct_url = re.sub(r'/api/v\d+/', '/api/v3/', streaming_url)
+ urlh = self._request_webpage(
+ direct_url, display_id, 'Checking for original quality', headers=headers, fatal=False)
+ if urlh and urlhandle_detect_ext(urlh) != 'm3u8':
+ formats.append({
+ 'url': direct_url,
+ 'format_id': 'original',
+ 'quality': 1,
+ **traverse_obj(result, ('value', {
+ 'ext': ('source', (('name', {determine_ext}), ('media_type', {mimetype2ext}))),
+ 'filesize': ('source', 'size', {int_or_none}),
+ 'width': ('video', 'width', {int_or_none}),
+ 'height': ('video', 'height', {int_or_none}),
+ }), get_all=False),
+ 'vcodec': 'none' if stream_type == 'audio' else None,
+ })
+
+ # HEAD request returns redirect response to m3u8 URL if available
final_url = self._request_webpage(
HEADRequest(streaming_url), display_id, headers=headers,
- note='Downloading streaming redirect url info').geturl()
+ note='Downloading streaming redirect url info').url
+
elif result.get('value_type') == 'stream':
claim_id, is_live = result['signing_channel']['claim_id'], True
live_data = self._download_json(
'https://api.odysee.live/livestream/is_live', claim_id,
query={'channel_claim_id': claim_id},
note='Downloading livestream JSON metadata')['data']
- streaming_url = final_url = live_data.get('VideoURL')
+ final_url = live_data.get('VideoURL')
# Upcoming videos may still give VideoURL
if not live_data.get('Live'):
- streaming_url = final_url = None
+ final_url = None
self.raise_no_formats('This stream is not live', True, claim_id)
+
else:
raise UnsupportedError(url)
- info = self._parse_stream(result, url)
if determine_ext(final_url) == 'm3u8':
- info['formats'] = self._extract_m3u8_formats(
- final_url, display_id, 'mp4', 'm3u8_native', m3u8_id='hls', live=is_live, headers=headers)
- else:
- info['url'] = streaming_url
+ formats.extend(self._extract_m3u8_formats(
+ final_url, display_id, 'mp4', m3u8_id='hls', live=is_live, headers=headers))
+
return {
- **info,
+ **self._parse_stream(result, url),
'id': claim_id,
- 'title': result['value']['title'],
+ 'formats': formats,
'is_live': is_live,
'http_headers': headers,
}
@@ -299,14 +335,12 @@ class LBRYChannelIE(LBRYBaseIE):
if not (stream_claim_name and stream_claim_id):
continue
- info = self._parse_stream(item, url)
- info.update({
+ yield {
+ **self._parse_stream(item, url),
'_type': 'url',
'id': stream_claim_id,
- 'title': try_get(item, lambda x: x['value']['title']),
'url': self._permanent_url(url, stream_claim_name, stream_claim_id),
- })
- yield info
+ }
def _real_extract(self, url):
display_id = self._match_id(url).replace(':', '#')