aboutsummaryrefslogtreecommitdiffstats
path: root/hypervideo_dl/extractor/canvas.py
diff options
context:
space:
mode:
Diffstat (limited to 'hypervideo_dl/extractor/canvas.py')
-rw-r--r--hypervideo_dl/extractor/canvas.py83
1 files changed, 47 insertions, 36 deletions
diff --git a/hypervideo_dl/extractor/canvas.py b/hypervideo_dl/extractor/canvas.py
index eefbab2..49e7e4e 100644
--- a/hypervideo_dl/extractor/canvas.py
+++ b/hypervideo_dl/extractor/canvas.py
@@ -1,7 +1,5 @@
from __future__ import unicode_literals
-import re
-import json
from .common import InfoExtractor
from .gigya import GigyaBaseIE
@@ -17,6 +15,7 @@ from ..utils import (
str_or_none,
strip_or_none,
url_or_none,
+ urlencode_postdata
)
@@ -24,7 +23,7 @@ class CanvasIE(InfoExtractor):
_VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?P<site_id>canvas|een|ketnet|vrt(?:video|nieuws)|sporza|dako)/assets/(?P<id>[^/?#&]+)'
_TESTS = [{
'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
- 'md5': '68993eda72ef62386a15ea2cf3c93107',
+ 'md5': '37b2b7bb9b3dcaa05b67058dc3a714a9',
'info_dict': {
'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475',
@@ -32,9 +31,9 @@ class CanvasIE(InfoExtractor):
'title': 'Nachtwacht: De Greystook',
'description': 'Nachtwacht: De Greystook',
'thumbnail': r're:^https?://.*\.jpg$',
- 'duration': 1468.04,
+ 'duration': 1468.02,
},
- 'expected_warnings': ['is not a supported codec', 'Unknown MIME type'],
+ 'expected_warnings': ['is not a supported codec'],
}, {
'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e',
'only_matching': True,
@@ -47,7 +46,7 @@ class CanvasIE(InfoExtractor):
_REST_API_BASE = 'https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v1'
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
+ mobj = self._match_valid_url(url)
site_id, video_id = mobj.group('site_id'), mobj.group('id')
data = None
@@ -83,24 +82,31 @@ class CanvasIE(InfoExtractor):
description = data.get('description')
formats = []
+ subtitles = {}
for target in data['targetUrls']:
format_url, format_type = url_or_none(target.get('url')), str_or_none(target.get('type'))
if not format_url or not format_type:
continue
format_type = format_type.upper()
if format_type in self._HLS_ENTRY_PROTOCOLS_MAP:
- formats.extend(self._extract_m3u8_formats(
+ fmts, subs = self._extract_m3u8_formats_and_subtitles(
format_url, video_id, 'mp4', self._HLS_ENTRY_PROTOCOLS_MAP[format_type],
- m3u8_id=format_type, fatal=False))
+ m3u8_id=format_type, fatal=False)
+ formats.extend(fmts)
+ subtitles = self._merge_subtitles(subtitles, subs)
elif format_type == 'HDS':
formats.extend(self._extract_f4m_formats(
format_url, video_id, f4m_id=format_type, fatal=False))
elif format_type == 'MPEG_DASH':
- formats.extend(self._extract_mpd_formats(
- format_url, video_id, mpd_id=format_type, fatal=False))
+ fmts, subs = self._extract_mpd_formats_and_subtitles(
+ format_url, video_id, mpd_id=format_type, fatal=False)
+ formats.extend(fmts)
+ subtitles = self._merge_subtitles(subtitles, subs)
elif format_type == 'HSS':
- formats.extend(self._extract_ism_formats(
- format_url, video_id, ism_id='mss', fatal=False))
+ fmts, subs = self._extract_ism_formats_and_subtitles(
+ format_url, video_id, ism_id='mss', fatal=False)
+ formats.extend(fmts)
+ subtitles = self._merge_subtitles(subtitles, subs)
else:
formats.append({
'format_id': format_type,
@@ -108,7 +114,6 @@ class CanvasIE(InfoExtractor):
})
self._sort_formats(formats)
- subtitles = {}
subtitle_urls = data.get('subtitleUrls')
if isinstance(subtitle_urls, list):
for subtitle in subtitle_urls:
@@ -186,7 +191,7 @@ class CanvasEenIE(InfoExtractor):
}]
def _real_extract(self, url):
- mobj = re.match(self._VALID_URL, url)
+ mobj = self._match_valid_url(url)
site_id, display_id = mobj.group('site_id'), mobj.group('id')
webpage = self._download_webpage(url, display_id)
@@ -259,7 +264,7 @@ class VrtNUIE(GigyaBaseIE):
'expected_warnings': ['Unable to download asset JSON', 'is not a supported codec', 'Unknown MIME type'],
}]
_NETRC_MACHINE = 'vrtnu'
- _APIKEY = '3_0Z2HujMtiWq_pkAjgnS2Md2E11a1AwZjYiBETtwNE-EoEHDINgtnvcAOpNgmrVGy'
+ _APIKEY = '3_qhEcPa5JGFROVwu5SWKqJ4mVOIkwlFNMSKwzPDAh8QZOtHqu6L4nD5Q7lk0eXOOG'
_CONTEXT_ID = 'R3595707040'
def _real_initialize(self):
@@ -270,35 +275,41 @@ class VrtNUIE(GigyaBaseIE):
if username is None:
return
- auth_data = {
- 'APIKey': self._APIKEY,
- 'targetEnv': 'jssdk',
- 'loginID': username,
- 'password': password,
- 'authMode': 'cookie',
- }
+ auth_info = self._download_json(
+ 'https://accounts.vrt.be/accounts.login', None,
+ note='Login data', errnote='Could not get Login data',
+ headers={}, data=urlencode_postdata({
+ 'loginID': username,
+ 'password': password,
+ 'sessionExpiration': '-2',
+ 'APIKey': self._APIKEY,
+ 'targetEnv': 'jssdk',
+ }))
- auth_info = self._gigya_login(auth_data)
+ if auth_info.get('errorDetails'):
+ raise ExtractorError('Unable to login: VrtNU said: ' + auth_info.get('errorDetails'), expected=True)
# Sometimes authentication fails for no good reason, retry
login_attempt = 1
while login_attempt <= 3:
try:
- # When requesting a token, no actual token is returned, but the
- # necessary cookies are set.
+ self._request_webpage('https://token.vrt.be/vrtnuinitlogin',
+ None, note='Requesting XSRF Token', errnote='Could not get XSRF Token',
+ query={'provider': 'site', 'destination': 'https://www.vrt.be/vrtnu/'})
+
+ post_data = {
+ 'UID': auth_info['UID'],
+ 'UIDSignature': auth_info['UIDSignature'],
+ 'signatureTimestamp': auth_info['signatureTimestamp'],
+ 'client_id': 'vrtnu-site',
+ '_csrf': self._get_cookies('https://login.vrt.be').get('OIDCXSRF').value,
+ }
+
self._request_webpage(
- 'https://token.vrt.be',
+ 'https://login.vrt.be/perform_login',
None, note='Requesting a token', errnote='Could not get a token',
- headers={
- 'Content-Type': 'application/json',
- 'Referer': 'https://www.vrt.be/vrtnu/',
- },
- data=json.dumps({
- 'uid': auth_info['UID'],
- 'uidsig': auth_info['UIDSignature'],
- 'ts': auth_info['signatureTimestamp'],
- 'email': auth_info['profile']['email'],
- }).encode('utf-8'))
+ headers={}, data=urlencode_postdata(post_data))
+
except ExtractorError as e:
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
login_attempt += 1