aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorpukkandan <pukkandan.ytdlp@gmail.com>2022-01-31 12:32:44 +0530
committerpukkandan <pukkandan.ytdlp@gmail.com>2022-02-01 00:12:34 +0530
commitc533c89ce1d6965d8575413738d76a5bf9e2de59 (patch)
treee1d0a99583f3a54836b8d5eaa57618a22a1e5943
parentb8b3f4562a95a3c6b3b47544d1a5e8390a5650fa (diff)
downloadhypervideo-pre-c533c89ce1d6965d8575413738d76a5bf9e2de59.tar.lz
hypervideo-pre-c533c89ce1d6965d8575413738d76a5bf9e2de59.tar.xz
hypervideo-pre-c533c89ce1d6965d8575413738d76a5bf9e2de59.zip
[GoogleSearch] Fix extractor
-rw-r--r--yt_dlp/extractor/googlesearch.py21
1 files changed, 9 insertions, 12 deletions
diff --git a/yt_dlp/extractor/googlesearch.py b/yt_dlp/extractor/googlesearch.py
index f605c0c35..4b8b1bcbb 100644
--- a/yt_dlp/extractor/googlesearch.py
+++ b/yt_dlp/extractor/googlesearch.py
@@ -8,36 +8,33 @@ from .common import SearchInfoExtractor
class GoogleSearchIE(SearchInfoExtractor):
IE_DESC = 'Google Video search'
- _MAX_RESULTS = 1000
IE_NAME = 'video.google:search'
_SEARCH_KEY = 'gvsearch'
- _WORKING = False
- _TEST = {
+ _TESTS = [{
'url': 'gvsearch15:python language',
'info_dict': {
'id': 'python language',
'title': 'python language',
},
'playlist_count': 15,
- }
+ }]
+ _PAGE_SIZE = 100
def _search_results(self, query):
for pagenum in itertools.count():
webpage = self._download_webpage(
- 'http://www.google.com/search',
- 'gvsearch:' + query,
- note='Downloading result page %s' % (pagenum + 1),
+ 'http://www.google.com/search', f'gvsearch:{query}',
+ note=f'Downloading result page {pagenum + 1}',
query={
'tbm': 'vid',
'q': query,
- 'start': pagenum * 10,
+ 'start': pagenum * self._PAGE_SIZE,
+ 'num': self._PAGE_SIZE,
'hl': 'en',
})
- for hit_idx, mobj in enumerate(re.finditer(
- r'<h3 class="r"><a href="([^"]+)"', webpage)):
- if re.search(f'id="vidthumb{hit_idx + 1}"', webpage):
- yield self.url_result(mobj.group(1))
+ for url in re.findall(r'<div[^>]* class="dXiKIc"[^>]*><a href="([^"]+)"', webpage):
+ yield self.url_result(url)
if not re.search(r'id="pnnext"', webpage):
return