aboutsummaryrefslogtreecommitdiffstats
path: root/hypervideo_dl/extractor/naver.py
diff options
context:
space:
mode:
Diffstat (limited to 'hypervideo_dl/extractor/naver.py')
-rw-r--r--hypervideo_dl/extractor/naver.py172
1 files changed, 158 insertions, 14 deletions
diff --git a/hypervideo_dl/extractor/naver.py b/hypervideo_dl/extractor/naver.py
index a6821ba..e2e6e97 100644
--- a/hypervideo_dl/extractor/naver.py
+++ b/hypervideo_dl/extractor/naver.py
@@ -1,16 +1,19 @@
-# coding: utf-8
-from __future__ import unicode_literals
-
+import itertools
import re
+from urllib.parse import urlparse, parse_qs
from .common import InfoExtractor
from ..utils import (
+ ExtractorError,
clean_html,
dict_get,
- ExtractorError,
int_or_none,
+ join_nonempty,
+ merge_dicts,
parse_duration,
+ traverse_obj,
try_get,
+ unified_timestamp,
update_url_query,
)
@@ -65,19 +68,16 @@ class NaverBaseIE(InfoExtractor):
formats.extend(self._extract_m3u8_formats(
update_url_query(stream_url, query), video_id,
'mp4', 'm3u8_native', m3u8_id=stream_type, fatal=False))
- self._sort_formats(formats)
replace_ext = lambda x, y: re.sub(self._CAPTION_EXT_RE, '.' + y, x)
def get_subs(caption_url):
if re.search(self._CAPTION_EXT_RE, caption_url):
- return [{
- 'url': replace_ext(caption_url, 'ttml'),
- }, {
- 'url': replace_ext(caption_url, 'vtt'),
- }]
- else:
- return [{'url': caption_url}]
+ return [
+ replace_ext(caption_url, 'ttml'),
+ replace_ext(caption_url, 'vtt'),
+ ]
+ return [caption_url]
automatic_captions = {}
subtitles = {}
@@ -86,7 +86,13 @@ class NaverBaseIE(InfoExtractor):
if not caption_url:
continue
sub_dict = automatic_captions if caption.get('type') == 'auto' else subtitles
- sub_dict.setdefault(dict_get(caption, ('locale', 'language')), []).extend(get_subs(caption_url))
+ lang = caption.get('locale') or join_nonempty('language', 'country', from_dict=caption) or 'und'
+ if caption.get('type') == 'fan':
+ lang += '_fan%d' % next(i for i in itertools.count(1) if f'{lang}_fan{i}' not in sub_dict)
+ sub_dict.setdefault(lang, []).extend({
+ 'url': sub_url,
+ 'name': join_nonempty('label', 'fanName', from_dict=caption, delim=' - '),
+ } for sub_url in get_subs(caption_url))
user = meta.get('user', {})
@@ -237,7 +243,6 @@ class NaverLiveIE(InfoExtractor):
quality.get('url'), video_id, 'mp4',
m3u8_id=quality.get('qualityId'), live=True
))
- self._sort_formats(formats)
return {
'id': video_id,
@@ -250,3 +255,142 @@ class NaverLiveIE(InfoExtractor):
'categories': [meta.get('categoryId')],
'is_live': True
}
+
+
+class NaverNowIE(NaverBaseIE):
+ IE_NAME = 'navernow'
+ _VALID_URL = r'https?://now\.naver\.com/s/now\.(?P<id>\w+)'
+ _API_URL = 'https://apis.naver.com/now_web/oldnow_web/v4'
+ _TESTS = [{
+ 'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay=',
+ 'md5': 'e05854162c21c221481de16b2944a0bc',
+ 'info_dict': {
+ 'id': '4759-26331132',
+ 'title': '아이키X노제\r\n💖꽁냥꽁냥💖(1)',
+ 'ext': 'mp4',
+ 'thumbnail': r're:^https?://.*\.jpg',
+ 'timestamp': 1650369600,
+ 'upload_date': '20220419',
+ 'uploader_id': 'now',
+ 'view_count': int,
+ 'uploader_url': 'https://now.naver.com/show/4759',
+ 'uploader': '아이키의 떰즈업',
+ },
+ 'params': {
+ 'noplaylist': True,
+ }
+ }, {
+ 'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=',
+ 'md5': '9f6118e398aa0f22b2152f554ea7851b',
+ 'info_dict': {
+ 'id': '4759-26601461',
+ 'title': '아이키: 나 리정한테 흔들렸어,,, 질투 폭발하는 노제 여보😾 [아이키의 떰즈업]ㅣ네이버 NOW.',
+ 'ext': 'mp4',
+ 'thumbnail': r're:^https?://.*\.jpg',
+ 'upload_date': '20220504',
+ 'timestamp': 1651648311,
+ 'uploader_id': 'now',
+ 'view_count': int,
+ 'uploader_url': 'https://now.naver.com/show/4759',
+ 'uploader': '아이키의 떰즈업',
+ },
+ 'params': {
+ 'noplaylist': True,
+ },
+ }, {
+ 'url': 'https://now.naver.com/s/now.4759',
+ 'info_dict': {
+ 'id': '4759',
+ 'title': '아이키의 떰즈업',
+ },
+ 'playlist_mincount': 101
+ }, {
+ 'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay',
+ 'info_dict': {
+ 'id': '4759',
+ 'title': '아이키의 떰즈업',
+ },
+ 'playlist_mincount': 101,
+ }, {
+ 'url': 'https://now.naver.com/s/now.4759?shareHightlight=26601461#highlight=',
+ 'info_dict': {
+ 'id': '4759',
+ 'title': '아이키의 떰즈업',
+ },
+ 'playlist_mincount': 101,
+ }, {
+ 'url': 'https://now.naver.com/s/now.kihyunplay?shareReplayId=30573291#replay',
+ 'only_matching': True,
+ }]
+
+ def _extract_replay(self, show_id, replay_id):
+ vod_info = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}', replay_id)
+ in_key = self._download_json(f'{self._API_URL}/shows/now.{show_id}/vod/{replay_id}/inkey', replay_id)['inKey']
+ return merge_dicts({
+ 'id': f'{show_id}-{replay_id}',
+ 'title': traverse_obj(vod_info, ('episode', 'title')),
+ 'timestamp': unified_timestamp(traverse_obj(vod_info, ('episode', 'start_time'))),
+ 'thumbnail': vod_info.get('thumbnail_image_url'),
+ }, self._extract_video_info(replay_id, vod_info['video_id'], in_key))
+
+ def _extract_show_replays(self, show_id):
+ page_size = 15
+ page = 1
+ while True:
+ show_vod_info = self._download_json(
+ f'{self._API_URL}/vod-shows/now.{show_id}', show_id,
+ query={'page': page, 'page_size': page_size},
+ note=f'Downloading JSON vod list for show {show_id} - page {page}'
+ )['response']['result']
+ for v in show_vod_info.get('vod_list') or []:
+ yield self._extract_replay(show_id, v['id'])
+
+ if len(show_vod_info.get('vod_list') or []) < page_size:
+ break
+ page += 1
+
+ def _extract_show_highlights(self, show_id, highlight_id=None):
+ page_size = 10
+ page = 1
+ while True:
+ highlights_videos = self._download_json(
+ f'{self._API_URL}/shows/now.{show_id}/highlights/videos/', show_id,
+ query={'page': page, 'page_size': page_size},
+ note=f'Downloading JSON highlights for show {show_id} - page {page}')
+
+ for highlight in highlights_videos.get('results') or []:
+ if highlight_id and highlight.get('clip_no') != int(highlight_id):
+ continue
+ yield merge_dicts({
+ 'id': f'{show_id}-{highlight["clip_no"]}',
+ 'title': highlight.get('title'),
+ 'timestamp': unified_timestamp(highlight.get('regdate')),
+ 'thumbnail': highlight.get('thumbnail_url'),
+ }, self._extract_video_info(highlight['clip_no'], highlight['video_id'], highlight['video_inkey']))
+
+ if len(highlights_videos.get('results') or []) < page_size:
+ break
+ page += 1
+
+ def _extract_highlight(self, show_id, highlight_id):
+ try:
+ return next(self._extract_show_highlights(show_id, highlight_id))
+ except StopIteration:
+ raise ExtractorError(f'Unable to find highlight {highlight_id} for show {show_id}')
+
+ def _real_extract(self, url):
+ show_id = self._match_id(url)
+ qs = parse_qs(urlparse(url).query)
+
+ if not self._yes_playlist(show_id, qs.get('shareHightlight')):
+ return self._extract_highlight(show_id, qs['shareHightlight'][0])
+ elif not self._yes_playlist(show_id, qs.get('shareReplayId')):
+ return self._extract_replay(show_id, qs['shareReplayId'][0])
+
+ show_info = self._download_json(
+ f'{self._API_URL}/shows/now.{show_id}/', show_id,
+ note=f'Downloading JSON vod list for show {show_id}')
+
+ return self.playlist_result(
+ itertools.chain(self._extract_show_replays(show_id), self._extract_show_highlights(show_id)),
+ show_id, show_info.get('title'))