jkjnkjkllkjjk

This commit is contained in:
nose
2025-11-30 11:39:04 -08:00
parent ed417c8200
commit 7a13af9a1f
15 changed files with 1150 additions and 363 deletions

View File

@@ -209,7 +209,7 @@ class SharedArgs:
STORAGE = CmdletArg(
"storage",
type="enum",
choices=["hydrus", "local", "debrid", "ftp"],
choices=["hydrus", "local", "debrid", "ftp", "matrix"],
required=False,
description="Storage location or destination for saving/uploading files.",
alias="s",
@@ -268,6 +268,7 @@ class SharedArgs:
'hydrus': Path.home() / ".hydrus" / "client_files",
'debrid': Path.home() / "Debrid",
'ftp': Path.home() / "FTP",
'matrix': Path.home() / "Matrix", # Placeholder, not used for upload path
}
if storage_value is None:

View File

@@ -541,8 +541,36 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
# Map provider 0x0 to storage 0x0 for download-data
if provider_name == "0x0":
dl_args.extend(["-storage", "0x0"])
return dl_module._run(result, dl_args, config)
# Capture results from download-data so we can add them to DB
captured_results = []
original_emit = ctx.emit
def capture_emit(obj):
captured_results.append(obj)
original_emit(obj)
ctx.emit = capture_emit
try:
ret_code = dl_module._run(result, dl_args, config)
finally:
ctx.emit = original_emit
if ret_code != 0:
return ret_code
# Process the downloaded files recursively to add them to DB
if captured_results:
log(f"Processing {len(captured_results)} downloaded file(s)...", file=sys.stderr)
success_count = 0
for res in captured_results:
# Recursively call add-file with the downloaded result
if _run(res, _args, config) == 0:
success_count += 1
return 0 if success_count > 0 else 1
return 0
if media_path is None:
log("File path could not be resolved")
@@ -609,13 +637,13 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
return 0
# Handle storage-based operations (location is not None here)
valid_locations = {'hydrus', 'local'}
valid_locations = {'hydrus', 'local', 'matrix'}
is_valid_location = location in valid_locations
is_local_path = not is_valid_location and ('/' in location or '\\' in location or ':' in location)
if not (is_valid_location or is_local_path):
log(f"❌ Invalid location: {location}")
log(f"Valid options: 'hydrus', 'local', or a directory path (e.g., C:\\Music or /home/user/music)")
log(f"Valid options: 'hydrus', 'local', 'matrix', or a directory path")
return 1
if location == 'local':
@@ -704,6 +732,36 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
return exit_code
elif location == 'matrix':
log(f"Uploading to Matrix: {media_path.name}", file=sys.stderr)
try:
result_url = storage["matrix"].upload(media_path, config=config)
log(f"Matrix: {result_url}", file=sys.stderr)
result_dict = create_pipe_object_result(
source='matrix',
identifier=result_url,
file_path=str(media_path),
cmdlet_name='add-file',
title=media_path.name,
target=result_url
)
ctx.emit(result_dict)
except Exception as exc:
log(f"Failed: {exc}", file=sys.stderr)
return 1
if delete_after_upload:
try:
media_path.unlink()
_cleanup_sidecar_files(media_path)
log(f"✅ Deleted file and sidecar", file=sys.stderr)
except Exception as exc:
log(f"⚠️ Could not delete file: {exc}", file=sys.stderr)
return 0
# location == 'hydrus'
# Compute file hash to check if already in Hydrus
log(f"Uploading to Hydrus: {media_path.name}", file=sys.stderr)

View File

@@ -1594,6 +1594,25 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
if not urls_to_download and files_downloaded_directly == 0:
debug(f"No downloadable URLs found")
return 1
# Deduplicate URLs while preserving order
unique_urls = []
seen_keys = set()
for u in urls_to_download:
key = None
if isinstance(u, dict):
key = u.get('url') or u.get('link') or u.get('target') or u.get('source_url')
if not key:
key = str(u)
else:
key = str(u)
if key and key not in seen_keys:
seen_keys.add(key)
unique_urls.append(u)
urls_to_download = unique_urls
debug(f"Processing {len(urls_to_download)} URL(s)")
for i, u in enumerate(urls_to_download, 1):
@@ -1749,6 +1768,108 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
debug(f" ✗ Error while borrowing: {e}")
exit_code = 1
continue
except Exception as e:
# Check for BookNotAvailableError (imported dynamically or by name)
if type(e).__name__ == 'BookNotAvailableError':
debug(f" ⚠ Book is waitlisted/unavailable on Archive.org")
# Fallback to LibGen if ISBN is available
isbn = url.get('isbn')
if isbn:
debug(f" ▶ Falling back to LibGen search for ISBN: {isbn}")
from helper.search_provider import LibGenProvider
provider = LibGenProvider(config)
# Search specifically by ISBN
results = provider.search(f"isbn:{isbn}", limit=1)
if results:
debug(f" ✓ Found {len(results)} result(s) on LibGen")
# Use the first result
libgen_result = results[0]
# Construct a new URL entry for the main loop to process
# We can't easily inject into the loop, so we'll process it here
# LibGen results from provider have 'target' as mirror URL or libgen:ID
target = libgen_result.target
debug(f" → Downloading from LibGen: {libgen_result.title}")
# We need to use the LibGen download logic.
# The easiest way is to call the UnifiedBookDownloader directly or
# delegate to the 'libgen' origin handler if we can.
# But we are inside the loop.
# Let's use UnifiedBookDownloader directly to download to final_output_dir
from helper.unified_book_downloader import UnifiedBookDownloader
downloader = UnifiedBookDownloader(config)
# The target might be a mirror URL or libgen:ID
# UnifiedBookDownloader.download_book expects a book dict or similar?
# Actually, let's look at how 'libgen' origin is handled in the main loop.
# It uses urls_to_download.append(url_entry).
# We can just process this result right here.
# The provider result has full_metadata which is the book dict.
book_data = libgen_result.full_metadata
# Download the book
# We need to find a working mirror
mirrors = book_data.get('mirrors', {})
download_url = book_data.get('mirror_url')
if not download_url and mirrors:
# Pick first mirror
download_url = next(iter(mirrors.values()))
if download_url:
debug(f" → Mirror: {download_url}")
# Use helper.download.download_media or similar?
# UnifiedBookDownloader has download_book(book, output_dir)
# Reconstruct book dict for downloader
# It expects: title, author, year, extension, mirrors, etc.
# book_data should have most of it.
filepath = downloader.download_book(book_data, final_output_dir)
if filepath:
debug(f" ✓ Successfully downloaded from LibGen: {filepath}")
downloaded_files.append(str(filepath))
# Emit result
file_hash = _compute_file_hash(filepath)
emit_tags = ['book', 'libgen']
if isbn: emit_tags.append(f'isbn:{isbn}')
pipe_obj = create_pipe_object_result(
source='libgen',
identifier=book_data.get('md5', 'unknown'),
file_path=str(filepath),
cmdlet_name='download-data',
title=libgen_result.title,
file_hash=file_hash,
tags=emit_tags,
source_url=download_url
)
pipeline_context.emit(pipe_obj)
exit_code = 0
continue # Success!
else:
debug(f" ✗ Failed to download from LibGen")
else:
debug(f" ✗ No download URL found in LibGen result")
else:
debug(f" ✗ No results found on LibGen for ISBN: {isbn}")
else:
debug(f" ⚠ No ISBN available for LibGen fallback")
# If fallback failed or wasn't possible, abort
debug(f" ✗ Unable to borrow from Archive.org and LibGen fallback failed.")
exit_code = 1
continue
else:
# Re-raise other exceptions
raise e
debug(f" → Extracting page information...")
# Try both URL formats
@@ -1806,8 +1927,10 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
import img2pdf
debug(f" → Merging pages into PDF...")
filename = title if title else f"book_{book_id_str}"
filename = "".join(c for c in filename if c.isalnum() or c in (' ', '.', '-'))[:100]
# Use title from result item if available, otherwise fallback to extracted title
filename_title = title_val if title_val and title_val != 'Unknown Book' else (title if title else f"book_{book_id_str}")
# Allow underscores and spaces
filename = "".join(c for c in filename_title if c.isalnum() or c in (' ', '.', '-', '_'))[:100]
output_path = Path(final_output_dir) / f"{filename}.pdf"
# Make unique filename if needed
@@ -1828,6 +1951,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
file_hash = _compute_file_hash(output_path)
# Build tags including ISBN if available
emit_tags = ['book', 'borrowed', 'pdf']
if title_val and title_val != 'Unknown Book':
emit_tags.append(f'title:{title_val}')
isbn_tag = url.get('isbn')
if isbn_tag:
emit_tags.append(f'isbn:{isbn_tag}')
@@ -2343,6 +2468,82 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results:
debug(f"Downloading: {url}")
# Special handling for LibGen URLs
if "libgen" in url or "library.lol" in url:
debug(f"🔄 Detected LibGen URL, using specialized downloader: {url}")
try:
from helper.libgen_service import download_from_mirror, search_libgen
# If it's a search/details page, try to find the download link
# e.g. https://libgen.li/series.php?id=577851
# We can try to extract the ID and search for it, or just try to download if it's a mirror
# Extract ID if possible, BUT skip for series/edition pages which are handled by download_from_mirror
libgen_id = ""
results = []
if "series.php" not in url and "edition.php" not in url:
match = re.search(r"id=(\d+)", url)
if match:
libgen_id = match.group(1)
debug(f" Extracted LibGen ID: {libgen_id}")
# Search by ID to get fresh mirror links
results = search_libgen(libgen_id, limit=1)
if results:
# Use the mirror URL from the result
mirror_url = results[0].get("mirror_url")
if mirror_url:
debug(f" Resolved to mirror URL: {mirror_url}")
url = mirror_url
# Attempt download with specialized function
# We need a filename. LibGen doesn't always give one easily in the URL.
# download_from_mirror expects a full path.
# We'll try to guess a filename or use a temp one and rename later?
# Actually download_from_mirror writes to output_path.
# Let's try to get metadata to make a good filename
filename = "libgen_download.bin"
if libgen_id and results:
title = results[0].get("title", "book")
ext = results[0].get("extension", "pdf")
# Sanitize filename
safe_title = "".join(c for c in title if c.isalnum() or c in (' ', '-', '_')).strip()
filename = f"{safe_title}.{ext}"
elif "series.php" in url:
filename = f"series_{re.search(r'id=(\d+)', url).group(1) if re.search(r'id=(\d+)', url) else 'unknown'}.pdf"
output_path = final_output_dir / filename
if download_from_mirror(url, output_path, log_info=debug, log_error=log):
debug(f"✓ LibGen download successful: {output_path}")
# Create a result object
info = {
"id": libgen_id or "libgen",
"title": filename,
"webpage_url": url,
"ext": output_path.suffix.lstrip("."),
}
# Emit result
pipeline_context.emit(create_pipe_object_result(
source="libgen",
identifier=libgen_id or "libgen",
file_path=str(output_path),
cmdlet_name="download-data",
title=filename,
extra=info
))
downloaded_files.append(str(output_path))
continue
else:
debug("⚠ LibGen specialized download failed, falling back to generic downloader...")
except Exception as e:
debug(f"⚠ LibGen specialized download error: {e}")
# Fall through to generic downloader
# Resolve cookies path if specified
final_cookies_path = None
if cookies_path:

103
cmdlets/matrix.py Normal file
View File

@@ -0,0 +1,103 @@
from typing import Any, Dict, Sequence, List
import sys
from ._shared import Cmdlet, CmdletArg, parse_cmdlet_args
from helper.logger import log, debug
from result_table import ResultTable
from helper.file_storage import MatrixStorageBackend
from config import save_config, load_config
import pipeline as ctx
def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
parsed = parse_cmdlet_args(args, CMDLET)
# Initialize backend
backend = MatrixStorageBackend()
# Get current default room
matrix_conf = config.get('storage', {}).get('matrix', {})
current_room_id = matrix_conf.get('room_id')
# Fetch rooms
debug("Fetching joined rooms from Matrix...")
rooms = backend.list_rooms(config)
if not rooms:
debug("No joined rooms found or Matrix not configured.")
return 1
# Handle selection if provided
selection = parsed.get("selection")
if selection:
new_room_id = None
selected_room_name = None
# Try as index (1-based)
try:
idx = int(selection) - 1
if 0 <= idx < len(rooms):
selected_room = rooms[idx]
new_room_id = selected_room['id']
selected_room_name = selected_room['name']
except ValueError:
# Try as Room ID
for room in rooms:
if room['id'] == selection:
new_room_id = selection
selected_room_name = room['name']
break
if new_room_id:
# Update config
# Load fresh config from disk to avoid saving runtime objects (like WorkerManager)
disk_config = load_config()
if 'storage' not in disk_config: disk_config['storage'] = {}
if 'matrix' not in disk_config['storage']: disk_config['storage']['matrix'] = {}
disk_config['storage']['matrix']['room_id'] = new_room_id
save_config(disk_config)
debug(f"Default Matrix room set to: {selected_room_name} ({new_room_id})")
current_room_id = new_room_id
else:
debug(f"Invalid selection: {selection}")
return 1
# Display table
table = ResultTable("Matrix Rooms")
for i, room in enumerate(rooms):
is_default = (room['id'] == current_room_id)
row = table.add_row()
row.add_column("Default", "*" if is_default else "")
row.add_column("Name", room['name'])
row.add_column("ID", room['id'])
# Set selection args so user can type @N to select
# This will run .matrix N
table.set_row_selection_args(i, [str(i + 1)])
table.set_source_command(".matrix")
# Register results
ctx.set_last_result_table_overlay(table, rooms)
ctx.set_current_stage_table(table)
print(table)
return 0
CMDLET = Cmdlet(
name=".matrix",
aliases=["matrix", "rooms"],
summary="List and select default Matrix room",
usage=".matrix [selection]",
args=[
CmdletArg(
name="selection",
type="string",
description="Index or ID of the room to set as default",
required=False
)
],
exec=_run
)

View File

@@ -70,12 +70,15 @@ def _extract_title_from_item(item: Dict[str, Any]) -> str:
return title or filename or "Unknown"
def _queue_items(items: List[Any], clear_first: bool = False) -> None:
def _queue_items(items: List[Any], clear_first: bool = False) -> bool:
"""Queue items to MPV, starting it if necessary.
Args:
items: List of items to queue
clear_first: If True, the first item will replace the current playlist
Returns:
True if MPV was started, False if items were queued via IPC.
"""
for i, item in enumerate(items):
# Extract URL/Path
@@ -115,7 +118,7 @@ def _queue_items(items: List[Any], clear_first: bool = False) -> None:
# MPV not running (or died)
# Start MPV with remaining items
_start_mpv(items[i:])
return
return True
elif resp.get("error") == "success":
# Also set property for good measure
if title:
@@ -125,14 +128,30 @@ def _queue_items(items: List[Any], clear_first: bool = False) -> None:
else:
error_msg = str(resp.get('error'))
debug(f"Failed to queue item: {error_msg}", file=sys.stderr)
return False
def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
"""Manage and play items in the MPV playlist via IPC."""
parsed = parse_cmdlet_args(args, CMDLET)
# Initialize mpv_started flag
mpv_started = False
# Handle positional index argument if provided
index_arg = parsed.get("index")
url_arg = parsed.get("url")
# If index_arg is provided but is not an integer, treat it as a URL
# This allows .pipe "http://..." without -url flag
if index_arg is not None:
try:
int(index_arg)
except ValueError:
# Not an integer, treat as URL if url_arg is not set
if not url_arg:
url_arg = index_arg
index_arg = None
clear_mode = parsed.get("clear")
list_mode = parsed.get("list")
@@ -141,6 +160,15 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
save_mode = parsed.get("save")
load_mode = parsed.get("load")
# Handle URL queuing
mpv_started = False
if url_arg:
mpv_started = _queue_items([url_arg])
# If we just queued a URL, we probably want to list the playlist to show it was added
# unless other flags are present
if not (clear_mode or play_mode or pause_mode or save_mode or load_mode):
list_mode = True
# Handle Save Playlist
if save_mode:
playlist_name = index_arg or f"Playlist {subprocess.check_output(['date', '/t'], shell=True).decode().strip()}"
@@ -296,7 +324,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
# Handle piped input (add to playlist)
# Skip adding if -list is specified (user just wants to see current playlist)
if result and not list_mode:
if result and not list_mode and not url_arg:
# If result is a list of items, add them to playlist
items_to_add = []
if isinstance(result, list):
@@ -304,7 +332,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
elif isinstance(result, dict):
items_to_add = [result]
_queue_items(items_to_add)
if _queue_items(items_to_add):
mpv_started = True
if items_to_add:
# If we added items, we might want to play the first one if nothing is playing?
@@ -315,6 +344,11 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
items = _get_playlist()
if items is None:
if mpv_started:
# MPV was just started, so we can't list items yet.
# But we know it's running (or trying to start), so don't start another instance.
return 0
debug("MPV is not running. Starting new instance...")
_start_mpv([])
return 0
@@ -369,7 +403,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
return 1
# List items (Default action or after clear)
if list_mode or index_arg is None:
if list_mode or (index_arg is None and not url_arg):
if not items:
debug("MPV playlist is empty.")
return 0
@@ -451,12 +485,18 @@ CMDLET = Cmdlet(
name=".pipe",
aliases=["pipe", "playlist", "queue", "ls-pipe"],
summary="Manage and play items in the MPV playlist via IPC",
usage=".pipe [index] [-clear]",
usage=".pipe [index|url] [-clear] [-url URL]",
args=[
CmdletArg(
name="index",
type="int",
description="Index of item to play or clear",
type="string", # Changed to string to allow URL detection
description="Index of item to play/clear, or URL to queue",
required=False
),
CmdletArg(
name="url",
type="string",
description="URL to queue",
required=False
),
CmdletArg(

View File

@@ -141,8 +141,33 @@ def _ensure_storage_columns(payload: Dict[str, Any]) -> Dict[str, Any]:
return payload
title = payload.get("title") or payload.get("name") or payload.get("target") or payload.get("path") or "Result"
store_label = payload.get("origin") or payload.get("source") or origin_value
# Handle extension
extension = payload.get("ext", "")
if not extension and title:
path_obj = Path(str(title))
if path_obj.suffix:
extension = path_obj.suffix.lstrip('.')
title = path_obj.stem
# Handle size
size_val = payload.get("size") or payload.get("size_bytes")
size_str = ""
if size_val:
try:
size_bytes = int(size_val)
size_mb = size_bytes / (1024 * 1024)
size_str = f"{size_mb:.1f} MB"
except (ValueError, TypeError):
size_str = str(size_val)
normalized = dict(payload)
normalized["columns"] = [("Title", str(title)), ("Store", str(store_label))]
normalized["columns"] = [
("Title", str(title)),
("Ext", str(extension)),
("Store", str(store_label)),
("Size", str(size_str))
]
return normalized