Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Adjust message for formerly "live" videos #211

Merged
merged 4 commits into from
Jul 5, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions cps/tasks/download.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,14 +15,16 @@
log = logger.create()

class TaskDownload(CalibreTask):
def __init__(self, task_message, media_url, original_url, current_user_name, shelf_id):
def __init__(self, task_message, media_url, original_url, current_user_name, shelf_id, duration, live_status):
super(TaskDownload, self).__init__(task_message)
self.message = task_message
self.media_url = media_url
self.media_url_link = f'<a href="{media_url}" target="_blank">{media_url}</a>'
self.original_url = original_url
self.current_user_name = current_user_name
self.shelf_id = shelf_id
self.duration = datetime.utcfromtimestamp(int(duration)).strftime("%H:%M:%S") if duration else "unknown"
self.live_status = live_status
self.start_time = self.end_time = datetime.now()
self.stat = STAT_WAITING
self.progress = 0
Expand Down Expand Up @@ -54,6 +56,9 @@ def run(self, worker_thread):
last_progress_time = datetime.now()
fragment_stuck_timeout = 30 # seconds

self.message = f"Downloading {self.media_url_link}..."
if self.live_status == "was_live":
self.message += f" (formerly live video, length/duration is {self.duration} seconds)"
while p.poll() is None:
self.end_time = datetime.now()
# Check if there's data available to read
Expand All @@ -69,15 +74,14 @@ def run(self, worker_thread):
elif re.search(pattern_progress, line):
percentage = int(re.search(r'\d+', line).group())
if percentage < 100:
self.message = f"Downloading {self.media_url_link}..."
self.progress = min(0.99, (complete_progress_cycle + (percentage / 100)) / 4)
if percentage == 100:
complete_progress_cycle += 1
last_progress_time = datetime.now()
else:
elapsed_time = (datetime.now() - last_progress_time).total_seconds()
if elapsed_time >= fragment_stuck_timeout:
self.message = f"Downloading {self.media_url_link}... (This is taking longer than expected)"
self.message += f"<br>Some fragments are taking longer than expected to download. Please wait..."

sleep(0.1)

Expand Down
19 changes: 13 additions & 6 deletions cps/tasks/metadata_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,14 +63,14 @@ def _fetch_requested_urls(self, conn):
try:
cursor = conn.execute("PRAGMA table_info(media)")
self.columns = [column[1] for column in cursor.fetchall()]
query = ("SELECT path, duration FROM media WHERE error IS NULL AND path LIKE 'http%'"
query = ("SELECT path, duration, live_status FROM media WHERE error IS NULL AND path LIKE 'http%'"
if "error" in self.columns
else "SELECT path, duration FROM media WHERE path LIKE 'http%'")
else "SELECT path, duration, live_status FROM media WHERE path LIKE 'http%'")
rows = conn.execute(query).fetchall()
requested_urls = {}
for path, duration in rows:
for path, duration, live_status in rows:
if duration is not None and duration > 0:
requested_urls[path] = {"duration": duration}
requested_urls[path] = {"duration": duration, "live_status": live_status}
else:
self.unavailable.append(path)
return requested_urls
Expand Down Expand Up @@ -125,10 +125,10 @@ def _sort_and_limit_requested_urls(self, requested_urls):
return dict(sorted(requested_urls.items(), key=lambda item: item[1]["views_per_day"], reverse=True)[:min(MAX_VIDEOS_PER_DOWNLOAD, len(requested_urls))])

def _add_download_tasks_to_worker(self, requested_urls):
for index, requested_url in enumerate(requested_urls.keys()):
for index, (requested_url, url_data) in enumerate(requested_urls.items()):
task_download = TaskDownload(_("Downloading %(url)s...", url=requested_url),
requested_url, self.original_url,
self.current_user_name, self.shelf_id)
self.current_user_name, self.shelf_id, duration=str(url_data["duration"]), live_status=url_data["live_status"])
WorkerThread.add(self.current_user_name, task_download)
num_requested_urls = len(requested_urls)
total_duration = sum(url_data["duration"] for url_data in requested_urls.values())
Expand All @@ -140,6 +140,13 @@ def _add_download_tasks_to_worker(self, requested_urls):
self.message += f"<br><br>Shelf Title: <a href='{shelf_url}' target='_blank'>{self.shelf_title}</a>"
if self.unavailable:
self.message += "<br><br>Unavailable Video(s):<br>" + "<br>".join(f'<a href="{url}" target="_blank">{url}</a>' for url in self.unavailable)
upcoming_live_urls = [url for url, url_data in requested_urls.items() if url_data["live_status"] == "is_upcoming"]
live_urls = [url for url, url_data in requested_urls.items() if url_data["live_status"] == "is_live"]
if upcoming_live_urls:
self.message += "<br><br>Upcoming Live Video(s):<br>" + "<br>".join(f'<a href="{url}" target="_blank">{url}</a>' for url in upcoming_live_urls)
if live_urls:
self.message += "<br><br>Live Video(s):<br>" + "<br>".join(f'<a href="{url}" target="_blank">{url}</a>' for url in live_urls)


def run(self, worker_thread):
self.worker_thread = worker_thread
Expand Down