diff options
Diffstat (limited to 'youtube_dl/extractor')
-rw-r--r-- | youtube_dl/extractor/extractors.py | 1 | ||||
-rw-r--r-- | youtube_dl/extractor/hrfensehen.py | 102 | ||||
-rw-r--r-- | youtube_dl/extractor/soundcloud.py | 97 | ||||
-rw-r--r-- | youtube_dl/extractor/viki.py | 4 |
4 files changed, 192 insertions, 12 deletions
diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 4b3092028..5ae0a34aa 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -440,6 +440,7 @@ from .hotstar import ( ) from .howcast import HowcastIE from .howstuffworks import HowStuffWorksIE +from .hrfensehen import HRFernsehenIE from .hrti import ( HRTiIE, HRTiPlaylistIE, diff --git a/youtube_dl/extractor/hrfensehen.py b/youtube_dl/extractor/hrfensehen.py new file mode 100644 index 000000000..2beadef2c --- /dev/null +++ b/youtube_dl/extractor/hrfensehen.py @@ -0,0 +1,102 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import json +import re + +from youtube_dl.utils import int_or_none, unified_timestamp, unescapeHTML +from .common import InfoExtractor + + +class HRFernsehenIE(InfoExtractor): + IE_NAME = 'hrfernsehen' + _VALID_URL = r'^https?://www\.(?:hr-fernsehen|hessenschau)\.de/.*,video-(?P<id>[0-9]{6})\.html' + + _TESTS = [{ + 'url': 'https://www.hessenschau.de/tv-sendung/hessenschau-vom-26082020,video-130546.html', + 'md5': '5c4e0ba94677c516a2f65a84110fc536', + 'info_dict': { + 'id': '130546', + 'ext': 'mp4', + 'description': 'Sturmtief Kirsten fegt über Hessen / Die Corona-Pandemie – eine Chronologie / ' + 'Sterbehilfe: Die Lage in Hessen / Miss Hessen leitet zwei eigene Unternehmen / ' + 'Pop-Up Museum zeigt Schwarze Unterhaltung und Black Music', + 'subtitles': {'de': [{ + 'url': 'https://hr-a.akamaihd.net/video/as/hessenschau/2020_08/hrLogo_200826200407_L385592_512x288-25p-500kbit.vtt' + }]}, + 'timestamp': 1598470200, + 'upload_date': '20200826', + 'thumbnails': [{ + 'url': 'https://www.hessenschau.de/tv-sendung/hs_ganz-1554~_t-1598465545029_v-16to9.jpg', + 'id': '0' + }, { + 'url': 'https://www.hessenschau.de/tv-sendung/hs_ganz-1554~_t-1598465545029_v-16to9__medium.jpg', + 'id': '1' + }], + 'title': 'hessenschau vom 26.08.2020' + } + }, { + 'url': 'https://www.hr-fernsehen.de/sendungen-a-z/mex/sendungen/fair-und-gut---was-hinter-aldis-eigenem-guetesiegel-steckt,video-130544.html', + 'only_matching': True + }] + + _GEO_COUNTRIES = ['DE'] + + def extract_airdate(self, loader_data): + airdate_str = loader_data.get('mediaMetadata', {}).get('agf', {}).get('airdate') + + if airdate_str is None: + return None + + return unified_timestamp(airdate_str) + + def extract_formats(self, loader_data): + stream_formats = [] + for stream_obj in loader_data["videoResolutionLevels"]: + stream_format = { + 'format_id': str(stream_obj['verticalResolution']) + "p", + 'height': stream_obj['verticalResolution'], + 'url': stream_obj['url'], + } + + quality_information = re.search(r'([0-9]{3,4})x([0-9]{3,4})-([0-9]{2})p-([0-9]{3,4})kbit', + stream_obj['url']) + if quality_information: + stream_format['width'] = int_or_none(quality_information.group(1)) + stream_format['height'] = int_or_none(quality_information.group(2)) + stream_format['fps'] = int_or_none(quality_information.group(3)) + stream_format['tbr'] = int_or_none(quality_information.group(4)) + + stream_formats.append(stream_format) + + self._sort_formats(stream_formats) + return stream_formats + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._html_search_meta( + ['og:title', 'twitter:title', 'name'], webpage) + description = self._html_search_meta( + ['description'], webpage) + + loader_str = unescapeHTML(self._search_regex(r"data-hr-mediaplayer-loader='([^']*)'", webpage, "ardloader")) + loader_data = json.loads(loader_str) + + info = { + 'id': video_id, + 'title': title, + 'description': description, + 'formats': self.extract_formats(loader_data), + 'timestamp': self.extract_airdate(loader_data) + } + + if "subtitle" in loader_data: + info["subtitles"] = {"de": [{"url": loader_data["subtitle"]}]} + + thumbnails = list(set([t for t in loader_data.get("previewImageUrl", {}).values()])) + if len(thumbnails) > 0: + info["thumbnails"] = [{"url": t} for t in thumbnails] + + return info diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index d37c52543..ac09cb5e6 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -3,6 +3,8 @@ from __future__ import unicode_literals import itertools import re +import json +import random from .common import ( InfoExtractor, @@ -28,6 +30,7 @@ from ..utils import ( update_url_query, url_or_none, urlhandle_detect_ext, + sanitized_Request, ) @@ -309,7 +312,81 @@ class SoundcloudIE(InfoExtractor): raise def _real_initialize(self): - self._CLIENT_ID = self._downloader.cache.load('soundcloud', 'client_id') or 'YUKXoArFcqrlQn9tfNHvvyfnDISj04zk' + self._CLIENT_ID = self._downloader.cache.load('soundcloud', 'client_id') or "T5R4kgWS2PRf6lzLyIravUMnKlbIxQag" # 'EXLwg5lHTO2dslU5EePe3xkw0m1h86Cd' # 'YUKXoArFcqrlQn9tfNHvvyfnDISj04zk' + self._login() + + _USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36" + _API_AUTH_QUERY_TEMPLATE = '?client_id=%s' + _API_AUTH_URL_PW = 'https://api-auth.soundcloud.com/web-auth/sign-in/password%s' + _access_token = None + _HEADERS = {} + _NETRC_MACHINE = 'soundcloud' + + def _login(self): + username, password = self._get_login_info() + if username is None: + return + + def genDevId(): + def genNumBlock(): + return ''.join([str(random.randrange(10)) for i in range(6)]) + return '-'.join([genNumBlock() for i in range(4)]) + + payload = { + 'client_id': self._CLIENT_ID, + 'recaptcha_pubkey': 'null', + 'recaptcha_response': 'null', + 'credentials': { + 'identifier': username, + 'password': password + }, + 'signature': self.sign(username, password, self._CLIENT_ID), + 'device_id': genDevId(), + 'user_agent': self._USER_AGENT + } + + query = self._API_AUTH_QUERY_TEMPLATE % self._CLIENT_ID + login = sanitized_Request(self._API_AUTH_URL_PW % query, json.dumps(payload).encode('utf-8')) + response = self._download_json(login, None) + self._access_token = response.get('session').get('access_token') + if not self._access_token: + self.report_warning('Unable to get access token, login may has failed') + else: + self._HEADERS = {'Authorization': 'OAuth ' + self._access_token} + + # signature generation + def sign(self, user, pw, clid): + a = 33 + i = 1 + s = 440123 + w = 117 + u = 1800000 + l = 1042 + b = 37 + k = 37 + c = 5 + n = "0763ed7314c69015fd4a0dc16bbf4b90" # _KEY + y = "8" # _REV + r = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36" # _USER_AGENT + e = user # _USERNAME + t = clid # _CLIENT_ID + + d = '-'.join([str(mInt) for mInt in [a, i, s, w, u, l, b, k]]) + p = n + y + d + r + e + t + d + n + h = p + + m = 8011470 + f = 0 + + for f in range(f, len(h)): + m = (m >> 1) + ((1 & m) << 23) + m += ord(h[f]) + m &= 16777215 + + # c is not even needed + out = str(y) + ':' + str(d) + ':' + format(m, 'x') + ':' + str(c) + + return out @classmethod def _resolv_url(cls, url): @@ -389,7 +466,7 @@ class SoundcloudIE(InfoExtractor): if not format_url: continue stream = self._download_json( - format_url, track_id, query=query, fatal=False) + format_url, track_id, query=query, fatal=False, headers=self._HEADERS) if not isinstance(stream, dict): continue stream_url = url_or_none(stream.get('url')) @@ -487,7 +564,7 @@ class SoundcloudIE(InfoExtractor): info_json_url = self._resolv_url(self._BASE_URL + resolve_title) info = self._download_json( - info_json_url, full_title, 'Downloading info JSON', query=query) + info_json_url, full_title, 'Downloading info JSON', query=query, headers=self._HEADERS) return self._extract_info_dict(info, full_title, token) @@ -503,7 +580,7 @@ class SoundcloudPlaylistBaseIE(SoundcloudIE): 'ids': ','.join([compat_str(t['id']) for t in tracks]), 'playlistId': playlist_id, 'playlistSecretToken': token, - }) + }, headers=self._HEADERS) entries = [] for track in tracks: track_id = str_or_none(track.get('id')) @@ -547,7 +624,7 @@ class SoundcloudSetIE(SoundcloudPlaylistBaseIE): full_title += '/' + token info = self._download_json(self._resolv_url( - self._BASE_URL + full_title), full_title) + self._BASE_URL + full_title), full_title, headers=self._HEADERS) if 'errors' in info: msgs = (compat_str(err['error_message']) for err in info['errors']) @@ -572,7 +649,7 @@ class SoundcloudPagedPlaylistBaseIE(SoundcloudIE): for i in itertools.count(): response = self._download_json( next_href, playlist_id, - 'Downloading track page %s' % (i + 1), query=query) + 'Downloading track page %s' % (i + 1), query=query, headers=self._HEADERS) collection = response['collection'] @@ -694,7 +771,7 @@ class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE): user = self._download_json( self._resolv_url(self._BASE_URL + uploader), - uploader, 'Downloading user info') + uploader, 'Downloading user info', headers=self._HEADERS) resource = mobj.group('rsrc') or 'all' @@ -719,7 +796,7 @@ class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE): def _real_extract(self, url): track_name = self._match_id(url) - track = self._download_json(self._resolv_url(url), track_name) + track = self._download_json(self._resolv_url(url), track_name, headers=self._HEADERS) track_id = self._search_regex( r'soundcloud:track-stations:(\d+)', track['id'], 'track id') @@ -752,7 +829,7 @@ class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE): data = self._download_json( self._API_V2_BASE + 'playlists/' + playlist_id, - playlist_id, 'Downloading playlist', query=query) + playlist_id, 'Downloading playlist', query=query, headers=self._HEADERS) return self._extract_set(data, token) @@ -789,7 +866,7 @@ class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE): for i in itertools.count(1): response = self._download_json( next_url, collection_id, 'Downloading page {0}'.format(i), - 'Unable to download API page') + 'Unable to download API page', headers=self._HEADERS) collection = response.get('collection', []) if not collection: diff --git a/youtube_dl/extractor/viki.py b/youtube_dl/extractor/viki.py index b0dcdc0e6..9e4171237 100644 --- a/youtube_dl/extractor/viki.py +++ b/youtube_dl/extractor/viki.py @@ -56,14 +56,14 @@ class VikiBaseIE(InfoExtractor): def _call_api(self, path, video_id, note, timestamp=None, post_data=None): resp = self._download_json( - self._prepare_call(path, timestamp, post_data), video_id, note) + self._prepare_call(path, timestamp, post_data), video_id, note, headers={'x-viki-app-ver': '2.2.5.1428709186'}, expected_status=[200, 400, 404]) error = resp.get('error') if error: if error == 'invalid timestamp': resp = self._download_json( self._prepare_call(path, int(resp['current_timestamp']), post_data), - video_id, '%s (retry)' % note) + video_id, '%s (retry)' % note, headers={'x-viki-app-ver': '2.2.5.1428709186'}, expected_status=[200, 400, 404]) error = resp.get('error') if error: self._raise_error(resp['error']) |