diff options
-rw-r--r-- | README.md | 5 | ||||
-rw-r--r-- | livie.el | 2 | ||||
-rw-r--r-- | livie.py | 55 |
3 files changed, 24 insertions, 38 deletions
@@ -13,12 +13,9 @@ Livie allows the user to search youtube.com and play the video from `mpv`. ## Requirements - `python >= 3.6` -- `python-beautifulsoup4` -- `python-lxml` - `mpv` -- `hypervideo` or `youtube-dl` - `sudo pacman -S python-beautifulsoup4 python-lxml mpv hypervideo` + `sudo pacman -S python mpv` ## Installation @@ -47,7 +47,7 @@ :group 'livie :type 'string) -(defvar livie-youtube-regexp "https://www.youtube.com/watch\\?v=[A-Za-z0-9_\\-]\\{11\\}") +(defvar livie-youtube-regexp "https://invidious.snopyta.org/latest_version\\?id=[A-Za-z0-9_\\-]\\{11\\}&itag=18&local=true") (define-derived-mode livie-mode special-mode "livie" @@ -1,47 +1,36 @@ """This module does render video""" import sys +import datetime +import json import requests -from bs4 import BeautifulSoup -BASE_URL = 'https://www.youtube.com' +BASE_URL = 'https://invidious.snopyta.org' -SEARCH_FILTER = '&sp=EgIQAQ%253D%253D' +URL = BASE_URL + '/api/v1/search?q=' + sys.argv[1] -URL = BASE_URL + '/results?search_query=' + sys.argv[1] + SEARCH_FILTER +RUTA = requests.get(URL) -HTML = requests.get(URL).text +VIDEOS = json.loads(RUTA.content) -SOUP = BeautifulSoup(HTML, 'lxml') +FIRST = True # skip line -FIRST = True - -for vid in SOUP.find_all(class_='yt-lockup-content'): - try: - link = BASE_URL + vid.h3.a['href'] - title = vid.h3.a.text - description = vid.h3.span.text - author = vid.find(class_='yt-lockup-byline').a.text - meta = vid.find(class_='yt-lockup-meta').ul.contents - time = meta[0].text - views_str = meta[-1].text[:-6] - views = int(views_str.replace(',', '')) - - except TypeError: - continue +for video in VIDEOS: + title = video.get('title', '') + videoid = video.get('videoId', '') + author = video.get('author', '') + link = BASE_URL + '/latest_version?id=' + videoid + '&itag=18&local=true' + time = str(datetime.timedelta(seconds=video.get('lengthSeconds', ''))) + publish = video.get('publishedText', '') if FIRST: FIRST = False else: - print() - - print(f' title: {title}') - print(f' url: {link}') - print(f' channel: {author}') - print(f' uploaded: {time}') - print(f' views: {views_str}') - -# test -# f = open('output.xml','w') -# f.write(str(SOUP)) -# f.write(soup.encode('utf-8')) + print() # print skip line + + # prints + print(f' title: {title}') + print(f' url: {link}') + print(f' channel: {author}') + print(f' time: {time}') + print(f' publish: {publish}') |