no logging

This commit is contained in:
nose
2025-11-26 00:02:33 -08:00
parent d1f08216a2
commit 935ce303d0
5 changed files with 201 additions and 232 deletions

View File

@@ -1058,11 +1058,11 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
debug(f"Torrent/magnet added: {arg[:50]}...")
elif _is_torrent_file_or_url(arg):
# Handle .torrent files and URLs
log(f"Processing torrent file/URL: {arg}", flush=True)
debug(f"Processing torrent file/URL: {arg}")
magnet = _process_torrent_input(arg)
if magnet and magnet.lower().startswith('magnet:'):
urls_to_download.append(magnet)
log(f"✓ Converted to magnet: {magnet[:70]}...", flush=True)
debug(f"✓ Converted to magnet: {magnet[:70]}...")
elif magnet:
urls_to_download.append(magnet)
else:
@@ -1081,17 +1081,17 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
line = line.strip()
if line and line.lower().startswith(('http://', 'https://')):
urls_to_download.append(line)
log(f"Loaded URLs from file: {arg}", flush=True)
debug(f"Loaded URLs from file: {arg}")
except Exception as e:
log(f"Error reading file {arg}: {e}", file=sys.stderr)
else:
log(f"Ignored argument: {arg}", file=sys.stderr)
debug(f"Ignored argument: {arg}")
# Item selection (for playlists/formats)
# Note: -item flag is deprecated in favor of @N pipeline selection, but kept for compatibility
playlist_items = parsed.get("item")
if playlist_items:
log(f"Item selection: {playlist_items}", flush=True)
debug(f"Item selection: {playlist_items}")
@@ -1149,7 +1149,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if isinstance(item, dict) and item.get('__playlist_url'):
playlist_url = item.get('__playlist_url')
item_num = item.get('__playlist_item', 1)
log(f"📍 Playlist item from add-file: #{item_num}", flush=True)
debug(f"📍 Playlist item from add-file: #{item_num}")
# Add to download list with marker
urls_to_download.append({
'__playlist_url': playlist_url,
@@ -1166,7 +1166,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if playlist_url:
# Playlist item selected - need to download this specific track
log(f"📍 Playlist item selected: #{item_num} - {item.get('title', 'Unknown')}", flush=True)
debug(f"📍 Playlist item selected: #{item_num} - {item.get('title', 'Unknown')}")
# Add to download list - the playlist will be probed and item extracted
# Store with special marker so we know which item to select
urls_to_download.append({
@@ -1177,14 +1177,14 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
# ====== CHECK FOR FORMAT SELECTION RESULT ======
if isinstance(item, dict) and item.get('format_id') is not None and item.get('source_url'):
log(f"🎬 Format selected from pipe: {item.get('format_id')}", flush=True)
log(f" Source URL: {item.get('source_url')}", flush=True)
debug(f"🎬 Format selected from pipe: {item.get('format_id')}")
debug(f" Source URL: {item.get('source_url')}")
# Store as dict so we can extract format_id + source_url during download
urls_to_download.append(item)
continue
elif hasattr(item, 'format_id') and hasattr(item, 'source_url') and item.format_id is not None:
log(f"🎬 Format selected from pipe: {item.format_id}", flush=True)
log(f" Source URL: {item.source_url}", flush=True)
debug(f"🎬 Format selected from pipe: {item.format_id}")
debug(f" Source URL: {item.source_url}")
urls_to_download.append({
'format_id': item.format_id,
'source_url': item.source_url,
@@ -1204,9 +1204,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
isbn = metadata.get('isbn') or item.get('isbn')
olid = metadata.get('olid') or item.get('olid')
log(f"[search-result] OpenLibrary: '{title}'", flush=True)
debug(f"[search-result] OpenLibrary: '{title}'")
if isbn:
log(f" ISBN: {isbn}", flush=True)
debug(f" ISBN: {isbn}")
# Check if book is borrowable from ebook_access field or status
ebook_access = metadata.get('ebook_access') or item.get('ebook_access', '')
@@ -1217,8 +1217,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
is_borrowable = _is_openlibrary_downloadable(ebook_access, status_text)
if is_borrowable:
log(f" ✓ Available for borrowing on Archive.org", flush=True)
log(f" → Queued for auto-borrowing...", flush=True)
debug(f" ✓ Available for borrowing on Archive.org")
debug(f" → Queued for auto-borrowing...")
# Queue borrow request as special dict object
# We need OCAID (Archive.org ID), not just numeric OLID
ocaid = archive_id
@@ -1233,7 +1233,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
ol_data = r.json()
ocaid = ol_data.get('ocaid')
except Exception as e:
log(f" ⚠ Could not fetch OCAID from OpenLibrary: {e}", file=sys.stderr)
debug(f" ⚠ Could not fetch OCAID from OpenLibrary: {e}")
if ocaid:
urls_to_download.append({
@@ -1246,7 +1246,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
else:
# OCAID not found - book claims borrowable but not on Archive.org
# Fall back to LibGen search instead
log(f" ⚠ Book marked borrowable but not found on Archive.org", file=sys.stderr)
debug(f" ⚠ Book marked borrowable but not found on Archive.org")
if isbn:
try:
from helper.search_provider import get_provider
@@ -1258,19 +1258,19 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
url = libgen_result.get('target') if isinstance(libgen_result, dict) else getattr(libgen_result, 'target', None)
if url:
urls_to_download.append(url)
log(f" ✓ Found on LibGen instead", flush=True)
debug(f" ✓ Found on LibGen instead")
else:
log(f" ⚠ Not found on LibGen", file=sys.stderr)
debug(f" ⚠ Not found on LibGen")
else:
log(f" ⚠ Not found on LibGen", file=sys.stderr)
debug(f" ⚠ Not found on LibGen")
else:
log(f" ⚠ LibGen provider not available", file=sys.stderr)
debug(f" ⚠ LibGen provider not available")
except Exception as e:
log(f" ✗ Error searching LibGen: {e}", file=sys.stderr)
debug(f" ✗ Error searching LibGen: {e}")
else:
# Book is NOT borrowable - route to LibGen
if isbn:
log(f" ⚠ Not available on Archive.org - attempting LibGen...", flush=True)
debug(f" ⚠ Not available on Archive.org - attempting LibGen...")
try:
from helper.search_provider import get_provider
libgen_provider = get_provider("libgen", config)
@@ -1281,21 +1281,21 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
url = libgen_result.get('target') if isinstance(libgen_result, dict) else getattr(libgen_result, 'target', None)
if url:
urls_to_download.append(url)
log(f" ✓ Found on LibGen", flush=True)
debug(f" ✓ Found on LibGen")
else:
log(f" ⚠ Not found on LibGen", file=sys.stderr)
debug(f" ⚠ Not found on LibGen")
else:
log(f" ⚠ Not found on LibGen", flush=True)
log(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data", flush=True)
debug(f" ⚠ Not found on LibGen")
debug(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data")
else:
log(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data", flush=True)
debug(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data")
except Exception as e:
log(f" ⚠ Could not search LibGen: {e}", file=sys.stderr)
log(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data", flush=True)
debug(f" ⚠ Could not search LibGen: {e}")
debug(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data")
else:
log(f" ⚠ ISBN not available", flush=True)
log(f" ▶ Visit: {item.get('target', 'https://openlibrary.org')}", flush=True)
log(f" ▶ Or find ISBN and use: search-file -provider libgen 'isbn:\"<ISBN>\"'", flush=True)
debug(f" ⚠ ISBN not available")
debug(f" ▶ Visit: {item.get('target', 'https://openlibrary.org')}")
debug(f" ▶ Or find ISBN and use: search-file -provider libgen 'isbn:\"<ISBN>\"'")
elif origin == 'soulseek':
# Handle Soulseek downloads using the provider
metadata = item.get('full_metadata', {}) if isinstance(item.get('full_metadata'), dict) else {}
@@ -1350,18 +1350,18 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
)
pipeline_context.emit(result_dict)
else:
log(f" ✗ Download failed (peer may be offline)", file=sys.stderr)
debug(f" ✗ Download failed (peer may be offline)")
if db:
db.append_worker_stdout(worker_id, f"✗ Download failed for {title}")
log(f" ▶ Try another result: search-file -provider soulseek \"...\" | @2 | download-data", flush=True)
debug(f" ▶ Try another result: search-file -provider soulseek \"...\" | @2 | download-data")
except Exception as e:
log(f" ✗ Download error: {e}", file=sys.stderr)
debug(f" ✗ Download error: {e}")
if db:
db.append_worker_stdout(worker_id, f"✗ Error: {e}")
log(f" ▶ Alternative: search-soulseek -download \"{title}\" -storage <location>", flush=True)
debug(f" ▶ Alternative: search-soulseek -download \"{title}\" -storage <location>")
else:
log(f"[search-result] Soulseek: '{title}'", flush=True)
log(f" ⚠ Missing download info (username/filename)", flush=True)
debug(f"[search-result] Soulseek: '{title}'")
debug(f" ⚠ Missing download info (username/filename)")
if db:
db.append_worker_stdout(worker_id, f"⚠ Missing download info for {title}")
elif origin == 'libgen':
@@ -1380,17 +1380,17 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
'book_id': book_id,
}
urls_to_download.append(url_entry)
log(f"[search-result] LibGen: '{title}'", flush=True)
log(f" ✓ Queued for download", flush=True)
debug(f"[search-result] LibGen: '{title}'")
debug(f" ✓ Queued for download")
if mirrors:
log(f" Mirrors available: {len(mirrors)}", flush=True)
debug(f" Mirrors available: {len(mirrors)}")
elif origin == 'debrid':
# Debrid results can use download-data
url = item.get('target')
if url:
urls_to_download.append(str(url))
log(f"[search-result] Debrid: '{title}'", flush=True)
log(f" ✓ Queued for download", flush=True)
debug(f"[search-result] Debrid: '{title}'")
debug(f" ✓ Queued for download")
else:
# Regular fields for non-search results
url = item.get('url') or item.get('link') or item.get('href') or item.get('target')
@@ -1407,9 +1407,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
isbn = metadata.get('isbn') or getattr(item, 'isbn', None)
olid = metadata.get('olid') or getattr(item, 'olid', None)
log(f"[search-result] OpenLibrary: '{title}'", flush=True)
debug(f"[search-result] OpenLibrary: '{title}'")
if isbn:
log(f" ISBN: {isbn}", flush=True)
debug(f" ISBN: {isbn}")
# Check if book is borrowable from ebook_access field or status
ebook_access = metadata.get('ebook_access') or getattr(item, 'ebook_access', '')
@@ -1421,8 +1421,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if is_borrowable:
# Book IS borrowable on Archive.org
log(f" ✓ Available for borrowing on Archive.org", flush=True)
log(f" → Queued for auto-borrowing...", flush=True)
debug(f" ✓ Available for borrowing on Archive.org")
debug(f" → Queued for auto-borrowing...")
# Queue borrow request as special dict object
ocaid = archive_id
if not ocaid and isbn:
@@ -1434,7 +1434,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
ol_data = r.json()
ocaid = ol_data.get('ocaid')
except Exception as e:
log(f" ⚠ Could not fetch OCAID from OpenLibrary: {e}", file=sys.stderr)
debug(f" ⚠ Could not fetch OCAID from OpenLibrary: {e}")
if ocaid:
urls_to_download.append({
'__borrow_request__': True,
@@ -1446,7 +1446,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
else:
# OCAID not found - book claims borrowable but not on Archive.org
# Fall back to LibGen search instead
log(f" ⚠ No Archive.org ID found - attempting LibGen instead...", file=sys.stderr)
debug(f" ⚠ No Archive.org ID found - attempting LibGen instead...")
if isbn:
try:
from helper.search_provider import get_provider
@@ -1458,21 +1458,21 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
url = libgen_result.get('target') if isinstance(libgen_result, dict) else getattr(libgen_result, 'target', None)
if url:
urls_to_download.append(url)
log(f" ✓ Found on LibGen instead", flush=True)
debug(f" ✓ Found on LibGen instead")
else:
log(f" ⚠ Not found on LibGen", file=sys.stderr)
debug(f" ⚠ Not found on LibGen")
else:
log(f" ⚠ Not found on LibGen", file=sys.stderr)
debug(f" ⚠ Not found on LibGen")
else:
log(f" ⚠ LibGen provider not available", file=sys.stderr)
debug(f" ⚠ LibGen provider not available")
except Exception as e:
log(f" ✗ Error searching LibGen: {e}", file=sys.stderr)
debug(f" ✗ Error searching LibGen: {e}")
else:
log(f" ⚠ ISBN not available for LibGen fallback", file=sys.stderr)
debug(f" ⚠ ISBN not available for LibGen fallback")
else:
# Book is NOT borrowable - route to LibGen
if isbn:
log(f" ⚠ Not available on Archive.org - attempting LibGen...", flush=True)
debug(f" ⚠ Not available on Archive.org - attempting LibGen...")
try:
from helper.search_provider import get_provider
libgen_provider = get_provider("libgen", config)
@@ -1483,21 +1483,21 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
url = libgen_result.get('target') if isinstance(libgen_result, dict) else getattr(libgen_result, 'target', None)
if url:
urls_to_download.append(url)
log(f" ✓ Found on LibGen", flush=True)
debug(f" ✓ Found on LibGen")
else:
log(f" ⚠ Not found on LibGen", file=sys.stderr)
debug(f" ⚠ Not found on LibGen")
else:
log(f" ⚠ Not found on LibGen", flush=True)
log(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data", flush=True)
debug(f" ⚠ Not found on LibGen")
debug(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data")
else:
log(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data", flush=True)
debug(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data")
except Exception as e:
log(f" ⚠ Could not search LibGen: {e}", file=sys.stderr)
log(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data", flush=True)
debug(f" ⚠ Could not search LibGen: {e}")
debug(f" ▶ To search LibGen: search-file -provider libgen 'isbn:{isbn}' | @1 | download-data")
else:
log(f" ⚠ ISBN not available", flush=True)
log(f" ▶ Visit: {getattr(item, 'target', 'https://openlibrary.org')}", flush=True)
log(f" ▶ Or find ISBN and use: search-file -provider libgen 'isbn:\"<ISBN>\"'", flush=True)
debug(f" ⚠ ISBN not available")
debug(f" ▶ Visit: {getattr(item, 'target', 'https://openlibrary.org')}")
debug(f" ▶ Or find ISBN and use: search-file -provider libgen 'isbn:\"<ISBN>\"'")
elif origin == 'soulseek':
# Handle Soulseek downloads using the provider
metadata = getattr(item, 'full_metadata', {}) if isinstance(getattr(item, 'full_metadata', None), dict) else {}
@@ -1510,8 +1510,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
import asyncio
from helper.search_provider import SoulSeekProvider
provider = SoulSeekProvider(config)
log(f"[search-result] Soulseek: '{title}'", flush=True)
log(f" ▶ Downloading from {username}...", flush=True)
debug(f"[search-result] Soulseek: '{title}'")
debug(f" ▶ Downloading from {username}...")
if db:
db.append_worker_stdout(worker_id, f"Downloading from Soulseek: {title} (from {username})")
@@ -1532,7 +1532,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if success:
downloaded_file = Path(provider.DOWNLOAD_DIR) / Path(filename).name
if downloaded_file.exists():
log(f" ✓ Downloaded: {downloaded_file.name}", flush=True)
debug(f" ✓ Downloaded: {downloaded_file.name}")
files_downloaded_directly += 1
if db:
db.append_worker_stdout(worker_id, f"✓ Downloaded: {downloaded_file.name}")
@@ -1552,18 +1552,18 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
)
pipeline_context.emit(result_dict)
else:
log(f" ✗ Download failed (peer may be offline)", file=sys.stderr)
debug(f" ✗ Download failed (peer may be offline)")
if db:
db.append_worker_stdout(worker_id, f"✗ Download failed for {title}")
log(f" ▶ Try another result: search-file -provider soulseek \"...\" | @2 | download-data", flush=True)
debug(f" ▶ Try another result: search-file -provider soulseek \"...\" | @2 | download-data")
except Exception as e:
log(f" ✗ Download error: {e}", file=sys.stderr)
debug(f" ✗ Download error: {e}")
if db:
db.append_worker_stdout(worker_id, f"✗ Error: {e}")
log(f" ▶ Alternative: search-soulseek -download \"{title}\" -storage <location>", flush=True)
debug(f" ▶ Alternative: search-soulseek -download \"{title}\" -storage <location>")
else:
log(f"[search-result] Soulseek: '{title}'", flush=True)
log(f" ⚠ Missing download info (username/filename)", flush=True)
debug(f"[search-result] Soulseek: '{title}'")
debug(f" ⚠ Missing download info (username/filename)")
if db:
db.append_worker_stdout(worker_id, f"⚠ Missing download info for {title}")
elif origin == 'libgen':
@@ -1592,15 +1592,15 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
urls_to_download.append(str(url))
if not urls_to_download and files_downloaded_directly == 0:
log(f"No downloadable URLs found", file=sys.stderr)
debug(f"No downloadable URLs found")
return 1
log(f"Processing {len(urls_to_download)} URL(s)", flush=True)
debug(f"Processing {len(urls_to_download)} URL(s)")
for i, u in enumerate(urls_to_download, 1):
if isinstance(u, dict):
log(f" [{i}] Format: {u.get('format_id', '?')} from {u.get('source_url', '?')[:60]}...", flush=True)
debug(f" [{i}] Format: {u.get('format_id', '?')} from {u.get('source_url', '?')[:60]}...")
else:
log(f" [{i}] URL: {str(u)[:60]}...", flush=True)
debug(f" [{i}] URL: {str(u)[:60]}...")
# ========================================================================
# RESOLVE OUTPUT DIRECTORY
@@ -1612,7 +1612,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if storage_location:
try:
final_output_dir = SharedArgs.resolve_storage(storage_location)
log(f"Using storage location: {storage_location}{final_output_dir}", flush=True)
debug(f"Using storage location: {storage_location}{final_output_dir}")
except ValueError as e:
log(str(e), file=sys.stderr)
return 1
@@ -1621,7 +1621,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if final_output_dir is None and resolve_output_dir is not None:
try:
final_output_dir = resolve_output_dir(config)
log(f"Using config resolver: {final_output_dir}", flush=True)
debug(f"Using config resolver: {final_output_dir}")
except Exception:
pass
@@ -1629,14 +1629,14 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if final_output_dir is None and config and config.get("outfile"):
try:
final_output_dir = Path(config["outfile"]).expanduser()
log(f"Using config outfile: {final_output_dir}", flush=True)
debug(f"Using config outfile: {final_output_dir}")
except Exception:
pass
# Priority 5: Default (home/Videos)
if final_output_dir is None:
final_output_dir = Path.home() / "Videos"
log(f"Using default directory: {final_output_dir}", flush=True)
debug(f"Using default directory: {final_output_dir}")
# Ensure directory exists
try:
@@ -1664,7 +1664,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
current_format_selector = format_selector
actual_url = url
if isinstance(url, dict) and url.get('format_id') and url.get('source_url'):
log(f"🎬 Format selected: {url.get('format_id')}", flush=True)
debug(f"🎬 Format selected: {url.get('format_id')}")
format_id = url.get('format_id')
current_format_selector = format_id
@@ -1674,7 +1674,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if vcodec and vcodec != "none" and (not acodec or acodec == "none"):
# Video-only format, add bestaudio automatically
current_format_selector = f"{format_id}+bestaudio"
log(f" Video-only format detected, automatically adding bestaudio", flush=True)
debug(f" Video-only format detected, automatically adding bestaudio")
actual_url = url.get('source_url')
url = actual_url # Use the actual URL for further processing
@@ -1688,15 +1688,15 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
book_id = url.get('book_id')
if not book_id:
log(f" ✗ Missing book ID for borrowing", file=sys.stderr)
debug(f" ✗ Missing book ID for borrowing")
exit_code = 1
continue
title_val = url.get('title', 'Unknown Book')
book_id_str = str(book_id)
log(f"[auto-borrow] Starting borrow for: {title_val}", flush=True)
log(f" Book ID: {book_id_str}", flush=True)
debug(f"[auto-borrow] Starting borrow for: {title_val}")
debug(f" Book ID: {book_id_str}")
# Get Archive.org credentials
email, password = credential_openlibrary(config)
@@ -1708,33 +1708,33 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
# Attempt to borrow and download
try:
log(f" → Logging into Archive.org...", flush=True)
debug(f" → Logging into Archive.org...")
from helper.archive_client import login
import requests
try:
session = login(email, password)
except requests.exceptions.Timeout:
log(f" ✗ Timeout logging into Archive.org (server not responding)", file=sys.stderr)
debug(f" ✗ Timeout logging into Archive.org (server not responding)")
exit_code = 1
continue
except requests.exceptions.RequestException as e:
log(f" ✗ Error connecting to Archive.org: {e}", file=sys.stderr)
debug(f" ✗ Error connecting to Archive.org: {e}")
exit_code = 1
continue
log(f" → Borrowing book...", flush=True)
debug(f" → Borrowing book...")
try:
session = loan(session, book_id_str, verbose=True)
except requests.exceptions.Timeout:
log(f" ✗ Timeout while borrowing (server not responding)", file=sys.stderr)
debug(f" ✗ Timeout while borrowing (server not responding)")
exit_code = 1
continue
except requests.exceptions.RequestException as e:
log(f" ✗ Error while borrowing: {e}", file=sys.stderr)
debug(f" ✗ Error while borrowing: {e}")
exit_code = 1
continue
log(f" → Extracting page information...", flush=True)
debug(f" → Extracting page information...")
# Try both URL formats
book_urls = [
f"https://archive.org/borrow/{book_id_str}",
@@ -1749,24 +1749,24 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
try:
title, links, metadata = get_book_infos(session, book_url)
if title and links:
log(f" → Found {len(links)} pages", flush=True)
debug(f" → Found {len(links)} pages")
break
except requests.exceptions.Timeout:
last_error = "Timeout while extracting pages"
log(f" ⚠ Timeout while extracting from {book_url}", flush=True)
debug(f" ⚠ Timeout while extracting from {book_url}")
continue
except Exception as e:
last_error = str(e)
log(f" ⚠ Failed to extract from {book_url}: {e}", flush=True)
debug(f" ⚠ Failed to extract from {book_url}: {e}")
continue
if not links:
log(f" ✗ Could not extract book pages (Last error: {last_error})", file=sys.stderr)
debug(f" ✗ Could not extract book pages (Last error: {last_error})")
exit_code = 1
continue
# Download pages
log(f" → Downloading {len(links)} pages...", flush=True)
debug(f" → Downloading {len(links)} pages...")
with tempfile.TemporaryDirectory() as temp_dir:
# download(session, n_threads, directory, links, scale, book_id)
images = download(
@@ -1779,16 +1779,16 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
)
if not images:
log(f" ✗ No pages downloaded", file=sys.stderr)
debug(f" ✗ No pages downloaded")
exit_code = 1
continue
log(f" ✓ Downloaded {len(images)} pages", flush=True)
debug(f" ✓ Downloaded {len(images)} pages")
# Try to merge into PDF
try:
import img2pdf
log(f" → Merging pages into PDF...", flush=True)
debug(f" → Merging pages into PDF...")
filename = title if title else f"book_{book_id_str}"
filename = "".join(c for c in filename if c.isalnum() or c in (' ', '.', '-'))[:100]
@@ -1805,7 +1805,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
with open(output_path, 'wb') as f:
f.write(pdf_content)
log(f" ✓ Successfully borrowed and saved to: {output_path}", flush=True)
debug(f" ✓ Successfully borrowed and saved to: {output_path}")
downloaded_files.append(str(output_path))
# Emit result for downstream cmdlets
@@ -1836,7 +1836,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
pipeline_context.emit(pipe_obj)
exit_code = 0
except ImportError:
log(f" ⚠ img2pdf not available - saving pages as collection", file=sys.stderr)
debug(f" ⚠ img2pdf not available - saving pages as collection")
# Just copy images to output dir
filename = title if title else f"book_{book_id_str}"
filename = "".join(c for c in filename if c.isalnum() or c in (' ', '.', '-'))[:100]
@@ -1847,7 +1847,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
i += 1
shutil.copytree(temp_dir, str(output_dir))
log(f" ✓ Successfully borrowed and saved to: {output_dir}", flush=True)
debug(f" ✓ Successfully borrowed and saved to: {output_dir}")
downloaded_files.append(str(output_dir))
# Emit result for downstream cmdlets
@@ -1877,7 +1877,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
exit_code = 0
except Exception as e:
log(f" ✗ Borrow/download failed: {e}", file=sys.stderr)
debug(f" ✗ Borrow/download failed: {e}")
import traceback
traceback.print_exc()
exit_code = 1
@@ -1885,11 +1885,11 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
continue # Skip normal URL handling
except ImportError as e:
log(f" ✗ Archive.org tools not available: {e}", file=sys.stderr)
debug(f" ✗ Archive.org tools not available: {e}")
exit_code = 1
continue
except Exception as e:
log(f" ✗ Auto-borrow error: {e}", file=sys.stderr)
debug(f" ✗ Auto-borrow error: {e}")
import traceback
traceback.print_exc()
exit_code = 1
@@ -1905,7 +1905,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
book_id = url.get('book_id', '')
if not primary_url:
log(f"Skipping libgen entry: no primary URL", file=sys.stderr)
debug(f"Skipping libgen entry: no primary URL")
exit_code = 1
continue
@@ -1916,11 +1916,11 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
# Remove duplicates while preserving order
mirrors_to_try = list(dict.fromkeys(mirrors_to_try))
log(f"🔄 LibGen download with mirror fallback (book_id: {book_id})", flush=True)
log(f" Primary: {primary_url[:80]}...", flush=True)
debug(f"🔄 LibGen download with mirror fallback (book_id: {book_id})")
debug(f" Primary: {primary_url[:80]}...")
if len(mirrors_to_try) > 1:
log(f" {len(mirrors_to_try) - 1} alternative mirror(s) available", flush=True)
debug(f" {len(mirrors_to_try) - 1} alternative mirror(s) available")
# Resolve cookies path
final_cookies_path_libgen = None
@@ -1941,7 +1941,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
for mirror_idx, mirror_url in enumerate(mirrors_to_try, 1):
try:
if mirror_idx > 1:
log(f" → Trying mirror #{mirror_idx}: {mirror_url[:80]}...", flush=True)
debug(f" → Trying mirror #{mirror_idx}: {mirror_url[:80]}...")
# Use libgen_service's download_from_mirror for proper libgen handling
from helper.libgen_service import download_from_mirror
@@ -1954,12 +1954,12 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
success = download_from_mirror(
mirror_url=mirror_url,
output_path=file_path,
log_info=lambda msg: log(f" {msg}", flush=True),
log_error=lambda msg: log(f"{msg}", file=sys.stderr)
log_info=lambda msg: debug(f" {msg}"),
log_error=lambda msg: debug(f"{msg}")
)
if success and file_path.exists():
log(f" ✓ Downloaded successfully from mirror #{mirror_idx}", flush=True)
debug(f" ✓ Downloaded successfully from mirror #{mirror_idx}")
successful_mirror = mirror_url
download_succeeded = True
@@ -1984,9 +1984,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
except Exception as e:
last_error = str(e)
if mirror_idx == 1:
log(f" ⚠ Primary mirror failed: {e}", flush=True)
debug(f" ⚠ Primary mirror failed: {e}")
else:
log(f" ⚠ Mirror #{mirror_idx} failed: {e}", flush=True)
debug(f" ⚠ Mirror #{mirror_idx} failed: {e}")
if not download_succeeded:
log(f" ✗ All mirrors failed. Last error: {last_error}", file=sys.stderr)
@@ -1998,7 +1998,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
continue # Skip to next URL
except Exception as e:
log(f" ✗ LibGen mirror fallback error: {e}", file=sys.stderr)
debug(f" ✗ LibGen mirror fallback error: {e}")
import traceback
traceback.print_exc(file=sys.stderr)
exit_code = 1
@@ -2010,20 +2010,20 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if isinstance(url, dict) and url.get('__playlist_url'):
playlist_url = url.get('__playlist_url')
item_num = url.get('__playlist_item', 1)
log(f"📍 Handling selected playlist item #{item_num}", flush=True)
debug(f"📍 Handling selected playlist item #{item_num}")
# Convert to actual URL and set playlist_items to download only this item
url = playlist_url
playlist_items = str(item_num)
# Fall through to normal handling below
else:
log(f"Skipping invalid URL entry: {url}", file=sys.stderr)
debug(f"Skipping invalid URL entry: {url}")
continue
log(f"Probing URL: {url}", flush=True)
debug(f"Probing URL: {url}")
# ====== TORRENT MODE - INTERCEPT BEFORE NORMAL DOWNLOAD ======
if torrent_mode or url.lower().startswith('magnet:'):
log(f"🧲 Torrent/magnet mode - spawning background worker...", flush=True)
debug(f"🧲 Torrent/magnet mode - spawning background worker...")
try:
# Get API key from config
@@ -2051,9 +2051,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
description=f"Torrent/magnet download via AllDebrid",
pipe=pipeline_context.get_current_command_text()
)
log(f"✓ Worker created (ID: {worker_id})", flush=True)
debug(f"✓ Worker created (ID: {worker_id})")
except Exception as e:
log(f"⚠ Failed to create worker: {e}", file=sys.stderr)
debug(f"⚠ Failed to create worker: {e}")
worker_manager = None
# Spawn background thread to handle the download
@@ -2075,7 +2075,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
)
worker_thread.start()
log(f"✓ Background worker started (ID: {worker_id})", flush=True)
debug(f"✓ Background worker started (ID: {worker_id})")
# Emit worker info so user can track it
worker_info = {
@@ -2110,7 +2110,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
is_actual_playlist = False # Track if we have a real multi-item playlist
if probe_info:
log(f"✓ Probed: {probe_info.get('title', url)} ({probe_info.get('extractor', 'unknown')})")
debug(f"✓ Probed: {probe_info.get('title', url)} ({probe_info.get('extractor', 'unknown')})")
# If it's a playlist, show the result table and skip download for now
entries = probe_info.get("entries", [])
@@ -2118,9 +2118,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
is_actual_playlist = True # We have a real playlist with multiple items
# Playlist detected but NO selection provided
# Always show table for user to select items
log(f"📋 Found playlist with {len(entries)} items")
debug(f"📋 Found playlist with {len(entries)} items")
_show_playlist_table(url, probe_info)
log(f" Playlist displayed. To select items, use @* or @1,3,5-8 syntax after piping results")
debug(f" Playlist displayed. To select items, use @* or @1,3,5-8 syntax after piping results")
playlists_displayed += 1
continue # Skip to next URL - don't download playlist without selection
elif entries and playlist_items:
@@ -2130,13 +2130,13 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
expanded_items = _expand_playlist_selection(playlist_items, len(entries))
playlist_items = expanded_items
selected_playlist_entries = _select_playlist_entries(entries, playlist_items)
log(f"📋 Found playlist with {len(entries)} items - downloading selected: {playlist_items}")
debug(f"📋 Found playlist with {len(entries)} items - downloading selected: {playlist_items}")
else:
log(f"Single item: {probe_info.get('title', 'Unknown')}")
debug(f"Single item: {probe_info.get('title', 'Unknown')}")
# ====== FORMAT LISTING MODE ======
if list_formats_mode and isinstance(url, str) and url.startswith(('http://', 'https://')):
log(f"Fetching formats for: {url}", flush=True)
debug(f"Fetching formats for: {url}")
from helper.download import list_formats
from result_table import ResultTable
@@ -2209,7 +2209,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
"source_url": url,
"index": i,
})
log(f"Use @N syntax to select a format and download", flush=True)
debug(f"Use @N syntax to select a format and download")
else:
log(f"✗ No formats available for this URL", file=sys.stderr)
@@ -2224,7 +2224,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
from result_table import ResultTable
if is_url_supported_by_ytdlp(url):
log(f"Checking available formats for: {url}", flush=True)
debug(f"Checking available formats for: {url}")
all_formats = list_formats(url, no_playlist=is_youtube_url, playlist_items=playlist_items)
if all_formats:
@@ -2237,14 +2237,14 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if 0 < idx <= len(formats):
fmt = formats[idx-1]
current_format_selector = fmt.get("format_id")
log(f"Selected format #{idx}: {current_format_selector}")
debug(f"Selected format #{idx}: {current_format_selector}")
playlist_items = None # Clear so it doesn't affect download options
else:
log(f"Invalid format index: {idx}", file=sys.stderr)
elif len(formats) > 1:
# Multiple formats available
log(f"📊 Found {len(formats)} available formats for: {probe_info.get('title', 'Unknown')}", flush=True)
debug(f"📊 Found {len(formats)} available formats for: {probe_info.get('title', 'Unknown')}")
# Always show table for format selection via @N syntax
# Show table and wait for @N selection
@@ -2294,8 +2294,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
table.set_row_selection_args(i, ["-item", str(i + 1)])
# Display table and emit formats so they can be selected with @N
log(str(table), flush=True)
log(f"💡 Use @N syntax to select a format and download (e.g., @1)", flush=True)
debug(str(table))
debug(f"💡 Use @N syntax to select a format and download (e.g., @1)")
# Store table for @N expansion so CLI can reconstruct commands
pipeline_context.set_current_stage_table(table)
@@ -2317,7 +2317,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
formats_displayed = True # Mark that we displayed formats
continue # Skip download, user must select format via @N
log(f"Downloading: {url}", flush=True)
debug(f"Downloading: {url}")
# Resolve cookies path if specified
final_cookies_path = None
@@ -2362,19 +2362,13 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
# Check if this was a playlist download (is_actual_playlist tracks if we have a multi-item playlist)
if is_actual_playlist:
if not selected_playlist_entries:
log(
"⚠ Playlist metadata unavailable; cannot emit selected items for this stage.",
file=sys.stderr,
)
debug("⚠ Playlist metadata unavailable; cannot emit selected items for this stage.")
exit_code = 1
continue
matched_after, _ = _snapshot_playlist_paths(selected_playlist_entries, final_output_dir)
if not matched_after:
log(
"⚠ No playlist files found for the selected items after download.",
file=sys.stderr,
)
debug("⚠ No playlist files found for the selected items after download.")
exit_code = 1
continue
@@ -2389,9 +2383,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
emit_targets = new_playlist_files if new_playlist_files else matched_after
if new_playlist_files:
log(f"📋 Playlist download completed: {len(new_playlist_files)} new file(s)")
debug(f"📋 Playlist download completed: {len(new_playlist_files)} new file(s)")
else:
log(f"📁 Reusing {len(emit_targets)} cached playlist file(s)", flush=True)
debug(f"📁 Reusing {len(emit_targets)} cached playlist file(s)")
for playlist_file in emit_targets:
file_hash = _compute_file_hash(playlist_file)
@@ -2444,7 +2438,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
downloaded_files.append(file_path)
pipeline_context.emit(pipe_obj)
log(f"✓ Downloaded: {file_path}", flush=True)
debug(f"✓ Downloaded: {file_path}")
else:
log(f"Download returned no result for {url}", file=sys.stderr)
exit_code = 1
@@ -2458,7 +2452,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
# Success if we downloaded files or displayed playlists/formats
if downloaded_files or files_downloaded_directly > 0:
total_files = len(downloaded_files) + files_downloaded_directly
log(f"✓ Successfully downloaded {total_files} file(s)", flush=True)
debug(f"✓ Successfully downloaded {total_files} file(s)")
# Create a result table for the downloaded files
# This ensures that subsequent @N commands select from these files
@@ -2496,14 +2490,14 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
return 0
if playlists_displayed:
log(f"✓ Displayed {playlists_displayed} playlist(s) for selection", flush=True)
debug(f"✓ Displayed {playlists_displayed} playlist(s) for selection")
if db:
db.update_worker_status(worker_id, 'completed')
db.close()
return 0 # Success - playlists shown
if formats_displayed:
log(f"✓ Format selection table displayed - use @N to select and download", flush=True)
debug(f"✓ Format selection table displayed - use @N to select and download")
if db:
db.update_worker_status(worker_id, 'completed')
db.close()