aboutsummaryrefslogtreecommitdiffstats
path: root/hypervideo_dl/extractor/reddit.py
diff options
context:
space:
mode:
Diffstat (limited to 'hypervideo_dl/extractor/reddit.py')
-rw-r--r--hypervideo_dl/extractor/reddit.py141
1 files changed, 127 insertions, 14 deletions
diff --git a/hypervideo_dl/extractor/reddit.py b/hypervideo_dl/extractor/reddit.py
index f1a5c85..813e628 100644
--- a/hypervideo_dl/extractor/reddit.py
+++ b/hypervideo_dl/extractor/reddit.py
@@ -1,4 +1,3 @@
-import random
import urllib.parse
from .common import InfoExtractor
@@ -9,12 +8,14 @@ from ..utils import (
traverse_obj,
try_get,
unescapeHTML,
+ urlencode_postdata,
url_or_none,
)
class RedditIE(InfoExtractor):
- _VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))'
+ _NETRC_MACHINE = 'reddit'
+ _VALID_URL = r'https?://(?P<host>(?:\w+\.)?reddit(?:media)?\.com)/(?P<slug>(?:(?:r|user)/[^/]+/)?comments/(?P<id>[^/?#&]+))'
_TESTS = [{
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
'info_dict': {
@@ -32,6 +33,7 @@ class RedditIE(InfoExtractor):
'dislike_count': int,
'comment_count': int,
'age_limit': 0,
+ 'channel_id': 'videos',
},
'params': {
'skip_download': True,
@@ -55,6 +57,30 @@ class RedditIE(InfoExtractor):
'dislike_count': int,
'comment_count': int,
'age_limit': 0,
+ 'channel_id': 'aww',
+ },
+ }, {
+ # User post
+ 'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
+ 'info_dict': {
+ 'id': 'zasobba6wp071',
+ 'ext': 'mp4',
+ 'display_id': 'nip71r',
+ 'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
+ 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
+ 'thumbnails': 'count:5',
+ 'timestamp': 1621709093,
+ 'upload_date': '20210522',
+ 'uploader': 'creepyt0es',
+ 'duration': 6,
+ 'like_count': int,
+ 'dislike_count': int,
+ 'comment_count': int,
+ 'age_limit': 0,
+ 'channel_id': 'u_creepyt0es',
+ },
+ 'params': {
+ 'skip_download': True,
},
}, {
# videos embedded in reddit text post
@@ -65,6 +91,66 @@ class RedditIE(InfoExtractor):
'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
},
}, {
+ # crossposted reddit-hosted media
+ 'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
+ 'md5': '746180895c7b75a9d6b05341f507699a',
+ 'info_dict': {
+ 'id': 'a1oneun6pa5a1',
+ 'ext': 'mp4',
+ 'display_id': 'zjjw82',
+ 'title': 'Cringe',
+ 'uploader': 'Otaku-senpai69420',
+ 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
+ 'upload_date': '20221212',
+ 'timestamp': 1670812309,
+ 'duration': 16,
+ 'like_count': int,
+ 'dislike_count': int,
+ 'comment_count': int,
+ 'age_limit': 0,
+ 'channel_id': 'dumbfuckers_club',
+ },
+ }, {
+ # post link without subreddit
+ 'url': 'https://www.reddit.com/comments/124pp33',
+ 'md5': '15eec9d828adcef4468b741a7e45a395',
+ 'info_dict': {
+ 'id': 'antsenjc2jqa1',
+ 'ext': 'mp4',
+ 'display_id': '124pp33',
+ 'title': 'Harmless prank of some old friends',
+ 'uploader': 'Dudezila',
+ 'channel_id': 'ContagiousLaughter',
+ 'duration': 17,
+ 'upload_date': '20230328',
+ 'timestamp': 1680012043,
+ 'thumbnail': r're:^https?://.*\.(?:jpg|png)',
+ 'age_limit': 0,
+ 'comment_count': int,
+ 'dislike_count': int,
+ 'like_count': int,
+ },
+ }, {
+ # quarantined subreddit post
+ 'url': 'https://old.reddit.com/r/GenZedong/comments/12fujy3/based_hasan/',
+ 'md5': '3156ea69e3c1f1b6259683c5abd36e71',
+ 'info_dict': {
+ 'id': '8bwtclfggpsa1',
+ 'ext': 'mp4',
+ 'display_id': '12fujy3',
+ 'title': 'Based Hasan?',
+ 'uploader': 'KingNigelXLII',
+ 'channel_id': 'GenZedong',
+ 'duration': 16,
+ 'upload_date': '20230408',
+ 'timestamp': 1680979138,
+ 'age_limit': 0,
+ 'comment_count': int,
+ 'dislike_count': int,
+ 'like_count': int,
+ },
+ 'skip': 'Requires account that has opted-in to the GenZedong subreddit',
+ }, {
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
'only_matching': True,
}, {
@@ -92,21 +178,45 @@ class RedditIE(InfoExtractor):
'only_matching': True,
}]
- @staticmethod
- def _gen_session_id():
- id_length = 16
- rand_max = 1 << (id_length * 4)
- return '%0.*x' % (id_length, random.randrange(rand_max))
+ def _perform_login(self, username, password):
+ captcha = self._download_json(
+ 'https://www.reddit.com/api/requires_captcha/login.json', None,
+ 'Checking login requirement')['required']
+ if captcha:
+ raise ExtractorError('Reddit is requiring captcha before login', expected=True)
+ login = self._download_json(
+ f'https://www.reddit.com/api/login/{username}', None, data=urlencode_postdata({
+ 'op': 'login-main',
+ 'user': username,
+ 'passwd': password,
+ 'api_type': 'json',
+ }), note='Logging in', errnote='Login request failed')
+ errors = '; '.join(traverse_obj(login, ('json', 'errors', ..., 1)))
+ if errors:
+ raise ExtractorError(f'Unable to login, Reddit API says {errors}', expected=True)
+ elif not traverse_obj(login, ('json', 'data', 'cookie', {str})):
+ raise ExtractorError('Unable to login, no cookie was returned')
def _real_extract(self, url):
- subdomain, slug, video_id = self._match_valid_url(url).group('subdomain', 'slug', 'id')
+ host, slug, video_id = self._match_valid_url(url).group('host', 'slug', 'id')
- self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id())
- self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')
- data = self._download_json(f'https://{subdomain}reddit.com/r/{slug}/.json', video_id, fatal=False)
+ data = self._download_json(
+ f'https://{host}/{slug}/.json', video_id, fatal=False, expected_status=403)
if not data:
- # Fall back to old.reddit.com in case the requested subdomain fails
- data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id)
+ fallback_host = 'old.reddit.com' if host != 'old.reddit.com' else 'www.reddit.com'
+ self.to_screen(f'{host} request failed, retrying with {fallback_host}')
+ data = self._download_json(
+ f'https://{fallback_host}/{slug}/.json', video_id, expected_status=403)
+
+ if traverse_obj(data, 'error') == 403:
+ reason = data.get('reason')
+ if reason == 'quarantined':
+ self.raise_login_required('Quarantined subreddit; an account that has opted in is required')
+ elif reason == 'private':
+ self.raise_login_required('Private subreddit; an account that has been approved is required')
+ else:
+ raise ExtractorError(f'HTTP Error 403 Forbidden; reason given: {reason}')
+
data = data[0]['data']['children'][0]['data']
video_url = data['url']
@@ -130,6 +240,7 @@ class RedditIE(InfoExtractor):
'url': unescapeHTML(thumbnail_url),
'width': int_or_none(src.get('width')),
'height': int_or_none(src.get('height')),
+ 'http_headers': {'Accept': '*/*'},
})
for image in try_get(data, lambda x: x['preview']['images']) or []:
@@ -146,6 +257,7 @@ class RedditIE(InfoExtractor):
'thumbnails': thumbnails,
'timestamp': float_or_none(data.get('created_utc')),
'uploader': data.get('author'),
+ 'channel_id': data.get('subreddit'),
'like_count': int_or_none(data.get('ups')),
'dislike_count': int_or_none(data.get('downs')),
'comment_count': int_or_none(data.get('num_comments')),
@@ -179,7 +291,8 @@ class RedditIE(InfoExtractor):
raise ExtractorError('No media found', expected=True)
# Check if media is hosted on reddit:
- reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False)
+ reddit_video = traverse_obj(data, (
+ (None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all=False)
if reddit_video:
playlist_urls = [
try_get(reddit_video, lambda x: unescapeHTML(x[y]))