aboutsummaryrefslogtreecommitdiffstats
path: root/youtube/subscriptions.py
diff options
context:
space:
mode:
authorAstounds <kirito@disroot.org>2026-04-25 01:02:17 -0500
committerAstounds <kirito@disroot.org>2026-04-25 01:02:17 -0500
commit50ad959a8051fec95f26b573f9fe067bdf3fdf6a (patch)
tree4d94f63cf9adb951d4200b0f2bb0c762d45297c4 /youtube/subscriptions.py
parenta0f315be51ef121618e73d5b450c8616c0d11d21 (diff)
downloadyt-local-50ad959a8051fec95f26b573f9fe067bdf3fdf6a.tar.lz
yt-local-50ad959a8051fec95f26b573f9fe067bdf3fdf6a.tar.xz
yt-local-50ad959a8051fec95f26b573f9fe067bdf3fdf6a.zip
refactor: replace string concatenations with f-strings
Diffstat (limited to 'youtube/subscriptions.py')
-rw-r--r--youtube/subscriptions.py41
1 files changed, 20 insertions, 21 deletions
diff --git a/youtube/subscriptions.py b/youtube/subscriptions.py
index dafea58..7d3efab 100644
--- a/youtube/subscriptions.py
+++ b/youtube/subscriptions.py
@@ -126,7 +126,7 @@ def delete_thumbnails(to_delete):
os.remove(os.path.join(thumbnails_directory, thumbnail))
existing_thumbnails.remove(video_id)
except Exception:
- print('Failed to delete thumbnail: ' + thumbnail)
+ print(f'Failed to delete thumbnail: {thumbnail}')
traceback.print_exc()
@@ -184,7 +184,7 @@ def _get_videos(cursor, number_per_page, offset, tag=None):
'time_published': exact_timestamp(db_video[3]) if db_video[4] else posix_to_dumbed_down(db_video[3]),
'author': db_video[5],
'author_id': db_video[6],
- 'author_url': '/https://www.youtube.com/channel/' + db_video[6],
+ 'author_url': f'/https://www.youtube.com/channel/{db_video[6]}',
})
return videos, pseudo_number_of_videos
@@ -304,9 +304,9 @@ def posix_to_dumbed_down(posix_time):
if delta >= unit_time:
quantifier = round(delta/unit_time)
if quantifier == 1:
- return '1 ' + unit_name + ' ago'
+ return f'1 {unit_name} ago'
else:
- return str(quantifier) + ' ' + unit_name + 's ago'
+ return f'{quantifier} {unit_name}s ago'
else:
raise Exception()
@@ -363,7 +363,7 @@ def autocheck_dispatcher():
time_until_earliest_job = earliest_job['next_check_time'] - time.time()
if time_until_earliest_job <= -5: # should not happen unless we're running extremely slow
- print('ERROR: autocheck_dispatcher got job scheduled in the past, skipping and rescheduling: ' + earliest_job['channel_id'] + ', ' + earliest_job['channel_name'] + ', ' + str(earliest_job['next_check_time']))
+ print(f'ERROR: autocheck_dispatcher got job scheduled in the past, skipping and rescheduling: {earliest_job["channel_id"]}, {earliest_job["channel_name"]}, {earliest_job["next_check_time"]}')
next_check_time = time.time() + 3600*secrets.randbelow(60)/60
with_open_db(_schedule_checking, earliest_job['channel_id'], next_check_time)
autocheck_jobs[earliest_job_index]['next_check_time'] = next_check_time
@@ -451,7 +451,7 @@ def check_channels_if_necessary(channel_ids):
def _get_atoma_feed(channel_id):
- url = 'https://www.youtube.com/feeds/videos.xml?channel_id=' + channel_id
+ url = f'https://www.youtube.com/feeds/videos.xml?channel_id={channel_id}'
try:
return util.fetch_url(url).decode('utf-8')
except util.FetchError as e:
@@ -485,16 +485,15 @@ def _get_channel_videos_first_page(channel_id, channel_status_name):
return channel_info
except util.FetchError as e:
if e.code == '429' and settings.route_tor:
- error_message = ('Error checking channel ' + channel_status_name
- + ': YouTube blocked the request because the'
- + ' Tor exit node is overutilized. Try getting a new exit node'
- + ' by using the New Identity button in the Tor Browser.')
+ error_message = (f'Error checking channel {channel_status_name}: '
+ f'YouTube blocked the request because the Tor exit node is overutilized. '
+ f'Try getting a new exit node by using the New Identity button in the Tor Browser.')
if e.ip:
- error_message += ' Exit node IP address: ' + e.ip
+ error_message += f' Exit node IP address: {e.ip}'
print(error_message)
return None
elif e.code == '502':
- print('Error checking channel', channel_status_name + ':', str(e))
+ print(f'Error checking channel {channel_status_name}: {e}')
return None
raise
@@ -505,7 +504,7 @@ def _get_upstream_videos(channel_id):
except KeyError:
channel_status_name = channel_id
- print("Checking channel: " + channel_status_name)
+ print(f"Checking channel: {channel_status_name}")
tasks = (
# channel page, need for video duration
@@ -550,15 +549,15 @@ def _get_upstream_videos(channel_id):
times_published[video_id_element.text] = time_published
except ValueError:
- print('Failed to read atoma feed for ' + channel_status_name)
+ print(f'Failed to read atoma feed for {channel_status_name}')
traceback.print_exc()
except defusedxml.ElementTree.ParseError:
- print('Failed to read atoma feed for ' + channel_status_name)
+ print(f'Failed to read atoma feed for {channel_status_name}')
if channel_info is None: # there was an error
return
if channel_info['error']:
- print('Error checking channel ' + channel_status_name + ': ' + channel_info['error'])
+ print(f'Error checking channel {channel_status_name}: {channel_info["error"]}')
return
videos = channel_info['items']
@@ -1023,7 +1022,7 @@ def get_subscriptions_page():
tag = request.args.get('tag', None)
videos, number_of_videos_in_db = _get_videos(cursor, 60, (page - 1)*60, tag)
for video in videos:
- video['thumbnail'] = util.URL_ORIGIN + '/data/subscription_thumbnails/' + video['id'] + '.jpg'
+ video['thumbnail'] = f'{util.URL_ORIGIN}/data/subscription_thumbnails/{video["id"]}.jpg'
video['type'] = 'video'
video['item_size'] = 'small'
util.add_extra_html_info(video)
@@ -1033,7 +1032,7 @@ def get_subscriptions_page():
subscription_list = []
for channel_name, channel_id, muted in _get_subscribed_channels(cursor):
subscription_list.append({
- 'channel_url': util.URL_ORIGIN + '/channel/' + channel_id,
+ 'channel_url': f'{util.URL_ORIGIN}/channel/{channel_id}',
'channel_name': channel_name,
'channel_id': channel_id,
'muted': muted,
@@ -1109,17 +1108,17 @@ def serve_subscription_thumbnail(thumbnail):
for quality in ('hq720.jpg', 'sddefault.jpg', 'hqdefault.jpg'):
url = f"https://i.ytimg.com/vi/{video_id}/{quality}"
try:
- image = util.fetch_url(url, report_text="Saved thumbnail: " + video_id)
+ image = util.fetch_url(url, report_text=f"Saved thumbnail: {video_id}")
break
except util.FetchError as e:
if '404' in str(e):
continue
- print("Failed to download thumbnail for " + video_id + ": " + str(e))
+ print(f"Failed to download thumbnail for {video_id}: {e}")
flask.abort(500)
except urllib.error.HTTPError as e:
if e.code == 404:
continue
- print("Failed to download thumbnail for " + video_id + ": " + str(e))
+ print(f"Failed to download thumbnail for {video_id}: {e}")
flask.abort(e.code)
if image is None: