From 9eff65d1af2ce5051b2229aeb116242bb34202e2 Mon Sep 17 00:00:00 2001 From: nose Date: Thu, 27 Nov 2025 10:59:01 -0800 Subject: [PATCH] jjlj --- CLI.py | 99 +++++++-- LUA/main.lua | 121 +++++++++++ TUI/menu_actions.py | 31 --- TUI/modalscreen/export.py | 47 ++-- TUI/modalscreen/search.py | 319 ++++++++++----------------- TUI/pipeline_runner.py | 14 +- TUI/tui.py | 72 +++--- TUI/tui.tcss | 20 +- cmdlets/__init__.py | 4 +- cmdlets/add_file.py | 20 +- cmdlets/add_tags.py | 2 +- cmdlets/delete_file.py | 132 ++++++----- cmdlets/delete_tag.py | 238 ++++++++++++-------- cmdlets/download_data.py | 32 ++- cmdlets/get_file.py | 175 ++++----------- cmdlets/get_tag.py | 25 ++- cmdlets/output_json.py | 14 ++ cmdlets/pipe.py | 445 ++++++++++++++++++++++---------------- cmdlets/screen_shot.py | 137 ++++++------ cmdlets/search_file.py | 27 +++ helper/download.py | 6 +- helper/file_storage.py | 225 ++++++++++++++++--- helper/hydrus.py | 2 +- helper/local_library.py | 108 +++++++++ helper/mpv_ipc.py | 290 +++++++++++++++++++++++++ helper/search_provider.py | 132 ++++++++++- hydrus_health_check.py | 153 +++++++++++-- metadata.py | 92 ++++---- result_table.py | 189 ++++++---------- test_search.py | 23 ++ 30 files changed, 2099 insertions(+), 1095 deletions(-) create mode 100644 LUA/main.lua create mode 100644 cmdlets/output_json.py create mode 100644 helper/mpv_ipc.py create mode 100644 test_search.py diff --git a/CLI.py b/CLI.py index 77b52f8..4e3c09a 100644 --- a/CLI.py +++ b/CLI.py @@ -240,6 +240,7 @@ def _close_cli_worker_manager() -> None: global _CLI_WORKER_MANAGER if _CLI_WORKER_MANAGER: try: + # print("[CLI] Closing worker manager...", file=sys.stderr) _CLI_WORKER_MANAGER.close() except Exception: pass @@ -273,7 +274,7 @@ def _ensure_worker_manager(config: Dict[str, Any]) -> Optional[WorkerManagerType _CLI_WORKER_MANAGER.close() except Exception: pass - _CLI_WORKER_MANAGER = WorkerManager(resolved_root, auto_refresh_interval=0) + _CLI_WORKER_MANAGER = WorkerManager(resolved_root, auto_refresh_interval=0.5) manager = _CLI_WORKER_MANAGER config['_worker_manager'] = manager if manager and not _CLI_ORPHAN_CLEANUP_DONE: @@ -586,17 +587,72 @@ def _create_cmdlet_cli(): app = typer.Typer(help="Medeia-Macina CLI") + @app.command("pipeline") + def pipeline( + command: str = typer.Option(..., "--pipeline", "-p", help="Pipeline command string to execute"), + seeds_json: Optional[str] = typer.Option(None, "--seeds-json", "-s", help="JSON string of seed items") + ): + """Execute a pipeline command non-interactively.""" + import shlex + import json + import pipeline as ctx + + # Load config + config = _load_cli_config() + + # Initialize debug logging if enabled + if config: + from helper.logger import set_debug + set_debug(config.get("debug", False)) + + # Handle seeds if provided + if seeds_json: + try: + seeds = json.loads(seeds_json) + # If seeds is a list, use it directly. If single item, wrap in list. + if not isinstance(seeds, list): + seeds = [seeds] + + # Set seeds as the result of a "virtual" previous stage + # This allows the first command in the pipeline to receive them as input + ctx.set_last_result_items_only(seeds) + except Exception as e: + print(f"Error parsing seeds JSON: {e}") + return + + try: + tokens = shlex.split(command) + except ValueError: + tokens = command.split() + + if not tokens: + return + + # Execute + _execute_pipeline(tokens) + @app.command("repl") def repl(): """Start interactive REPL for cmdlets with autocomplete.""" banner = """ -Medeia-Macina -======================================= -Commands: help | exit | --help -Example: search-file --help + Medeia-Macina +===================== +|123456789|ABCDEFGHI| +|246813579|JKLMNOPQR| +|369369369|STUVWXYZ0| +|483726159|ABCDEFGHI| +|516273849|JKLMNOPQR| +|639639639|STUVWXYZ0| +|753186429|ABCDEFGHI| +|876543219|JKLMNOPQR| +|999999999|STUVWXYZ0| +===================== """ print(banner) + # Configurable prompt + prompt_text = ">>>|" + # Pre-acquire Hydrus session key at startup (like hub-ui does) try: config = _load_cli_config() @@ -621,10 +677,12 @@ Example: search-file --help # Check MPV availability at startup try: - from hydrus_health_check import check_mpv_availability + from hydrus_health_check import check_mpv_availability, initialize_matrix_health_check, initialize_hydrus_health_check check_mpv_availability() + initialize_hydrus_health_check(config) + initialize_matrix_health_check(config) except Exception as e: - debug(f"⚠ Could not check MPV availability: {e}") + debug(f"⚠ Could not check service availability: {e}") except Exception: pass # Silently ignore if config loading fails @@ -635,8 +693,8 @@ Example: search-file --help style = Style.from_dict({ 'cmdlet': '#ffffff', # white 'argument': '#3b8eea', # blue-ish - 'value': '#ce9178', # red-ish - 'string': '#ce55ff', # purple + 'value': "#9a3209", # red-ish + 'string': "#6d0d93", # purple 'pipe': '#4caf50', # green }) @@ -646,16 +704,17 @@ Example: search-file --help style=style ) - def get_input(prompt: str = ">>>|") -> str: + def get_input(prompt: str = prompt_text) -> str: return session.prompt(prompt) else: - def get_input(prompt: str = ">>>|") -> str: + def get_input(prompt: str = prompt_text) -> str: return input(prompt) while True: + print("#-------------------------------------------------------------------------#") try: - user_input = get_input(">>>|").strip() + user_input = get_input(prompt_text).strip() except (EOFError, KeyboardInterrupt): print("\nGoodbye!") break @@ -741,6 +800,17 @@ Example: search-file --help finally: if pipeline_ctx_ref: pipeline_ctx_ref.clear_current_command_text() + + @app.callback(invoke_without_command=True) + def main_callback(ctx: typer.Context): + """ + Medeia-Macina CLI entry point. + If no command is provided, starts the interactive REPL. + """ + # Check if a subcommand is invoked + # Note: ctx.invoked_subcommand is None if no command was passed + if ctx.invoked_subcommand is None: + repl() return app @@ -933,7 +1003,7 @@ def _execute_pipeline(tokens: list): stages.append(['.pipe']) # Force should_expand_to_command to False so we fall through to filtering should_expand_to_command = False - + elif isinstance(piped_result, (list, tuple)): first_item = piped_result[0] if piped_result else None if isinstance(first_item, dict) and first_item.get('format_id') is not None: @@ -1349,7 +1419,8 @@ def _execute_cmdlet(cmd_name: str, args: list): # Special case: if this was a youtube search, print a hint about auto-piping if cmd_name == 'search-file' and filtered_args and 'youtube' in filtered_args: - print("\n[Hint] Type @N to play a video in MPV (e.g. @1)") + # print("\n[Hint] Type @N to play a video in MPV (e.g. @1)") + pass else: # Fallback to raw output if ResultTable not available for emitted in pipeline_ctx.emits: diff --git a/LUA/main.lua b/LUA/main.lua new file mode 100644 index 0000000..14af2d9 --- /dev/null +++ b/LUA/main.lua @@ -0,0 +1,121 @@ +local mp = require 'mp' +local utils = require 'mp.utils' +local msg = require 'mp.msg' + +local M = {} + +-- Configuration +local opts = { + python_path = "python", + cli_path = nil -- Will be auto-detected if nil +} + +-- Detect CLI path +local script_dir = mp.get_script_directory() +if not opts.cli_path then + -- Assuming the structure is repo/LUA/script.lua and repo/CLI.py + -- We need to go up one level + local parent_dir = script_dir:match("(.*)[/\\]") + if parent_dir then + opts.cli_path = parent_dir .. "/CLI.py" + else + opts.cli_path = "CLI.py" -- Fallback + end +end + +-- Helper to run pipeline +function M.run_pipeline(pipeline_cmd, seeds) + local args = {opts.python_path, opts.cli_path, "pipeline", pipeline_cmd} + + if seeds then + local seeds_json = utils.format_json(seeds) + table.insert(args, "--seeds") + table.insert(args, seeds_json) + end + + msg.info("Running pipeline: " .. pipeline_cmd) + local res = utils.subprocess({ + args = args, + cancellable = false, + }) + + if res.status ~= 0 then + msg.error("Pipeline error: " .. (res.stderr or "unknown")) + mp.osd_message("Error: " .. (res.stderr or "unknown"), 5) + return nil + end + + return res.stdout +end + +-- Helper to run pipeline and parse JSON output +function M.run_pipeline_json(pipeline_cmd, seeds) + -- Append | output-json if not present + if not pipeline_cmd:match("output%-json$") then + pipeline_cmd = pipeline_cmd .. " | output-json" + end + + local output = M.run_pipeline(pipeline_cmd, seeds) + if output then + local ok, data = pcall(utils.parse_json, output) + if ok then + return data + else + msg.error("Failed to parse JSON: " .. output) + return nil + end + end + return nil +end + +-- Command: Get info for current file +function M.get_file_info() + local path = mp.get_property("path") + if not path then return end + + -- We can pass the path as a seed item + local seed = {{path = path}} + + -- Run pipeline: get-metadata + local data = M.run_pipeline_json("get-metadata", seed) + + if data then + -- Display metadata + msg.info("Metadata: " .. utils.format_json(data)) + mp.osd_message("Metadata loaded (check console)", 3) + end +end + +-- Command: Delete current file +function M.delete_current_file() + local path = mp.get_property("path") + if not path then return end + + local seed = {{path = path}} + + M.run_pipeline("delete-file", seed) + mp.osd_message("File deleted", 3) + mp.command("playlist-next") +end + +-- Menu integration with UOSC +function M.show_menu() + local menu_data = { + title = "Medios Macina", + items = { + { title = "Get Metadata", value = "script-binding medios-info", hint = "Ctrl+i" }, + { title = "Delete File", value = "script-binding medios-delete", hint = "Ctrl+Del" }, + } + } + + local json = utils.format_json(menu_data) + mp.commandv('script-message-to', 'uosc', 'open-menu', json) +end + +-- Keybindings +mp.add_key_binding("m", "medios-menu", M.show_menu) +mp.add_key_binding("mbtn_right", "medios-menu-right-click", M.show_menu) +mp.add_key_binding("ctrl+i", "medios-info", M.get_file_info) +mp.add_key_binding("ctrl+del", "medios-delete", M.delete_current_file) + +return M diff --git a/TUI/menu_actions.py b/TUI/menu_actions.py index 7c882c2..a6aff1d 100644 --- a/TUI/menu_actions.py +++ b/TUI/menu_actions.py @@ -68,37 +68,6 @@ def group_tags_by_namespace(tags: Sequence[str]) -> Dict[str, List[str]]: return grouped -def build_metadata_snapshot(file_path: Path) -> Dict[str, Any]: - """Load any available sidecar metadata for the selected file.""" - - snapshot: Dict[str, Any] = { - "file": str(file_path), - "tags": group_tags_by_namespace(load_tags(file_path)), - } - - try: - sidecar = metadata._derive_sidecar_path(file_path) - if sidecar.is_file(): - title, tags, notes = metadata._read_sidecar_metadata(sidecar) - snapshot["sidecar"] = { - "title": title, - "tags": group_tags_by_namespace(tags), - "notes": notes, - } - except Exception: - snapshot["sidecar"] = None - - return snapshot - - -def summarize_result(result: Dict[str, Any]) -> str: - """Build a one-line summary for a pipeline result row.""" - - title = result.get("title") or result.get("identifier") or result.get("file_path") - source = result.get("source") or result.get("cmdlet") or "result" - return f"{source}: {title}" if title else source - - def normalize_tags(tags: Iterable[str]) -> List[str]: """Expose metadata.normalize_tags for callers that imported the old helper.""" diff --git a/TUI/modalscreen/export.py b/TUI/modalscreen/export.py index 3ab7e2c..4a5731e 100644 --- a/TUI/modalscreen/export.py +++ b/TUI/modalscreen/export.py @@ -69,33 +69,34 @@ class ExportModal(ModalScreen): """ ext_lower = ext.lower() if ext else '' - # Audio formats - audio_exts = {'.mp3', '.flac', '.wav', '.aac', '.ogg', '.m4a', '.wma', '.opus', '.mka'} - audio_formats = [("MKA", "mka"), ("MP3", "mp3"), ("M4A", "m4a"), ("FLAC", "flac"), ("WAV", "wav"), ("AAC", "aac"), ("OGG", "ogg"), ("Opus", "opus")] + from helper.utils_constant import mime_maps - # Video formats (can have audio too) - video_exts = {'.mp4', '.mkv', '.webm', '.avi', '.mov', '.flv', '.wmv', '.m4v', '.ts', '.mpg', '.mpeg'} - video_formats = [("MP4", "mp4"), ("MKV", "mkv"), ("WebM", "webm"), ("AVI", "avi"), ("MOV", "mov")] + found_type = "unknown" - # Image formats - image_exts = {'.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', '.tiff', '.ico'} - image_formats = [("JPG", "jpg"), ("PNG", "png"), ("WebP", "webp"), ("GIF", "gif"), ("BMP", "bmp")] + # Find type based on extension + for category, formats in mime_maps.items(): + for fmt_key, fmt_info in formats.items(): + if fmt_info.get("ext") == ext_lower: + found_type = category + break + if found_type != "unknown": + break - # Document formats - no conversion for now - document_exts = {'.pdf', '.epub', '.txt', '.docx', '.doc', '.rtf', '.md', '.html', '.mobi', '.cbz', '.cbr'} - document_formats = [] + # Build format options for the found type + format_options = [] - if ext_lower in audio_exts: - return ('audio', audio_formats) - elif ext_lower in video_exts: - return ('video', video_formats) - elif ext_lower in image_exts: - return ('image', image_formats) - elif ext_lower in document_exts: - return ('document', document_formats) - else: - # Default to audio if unknown - return ('unknown', audio_formats) + # If unknown, fallback to audio (matching legacy behavior) + target_type = found_type if found_type in mime_maps else "audio" + + if target_type in mime_maps: + # Sort formats alphabetically + sorted_formats = sorted(mime_maps[target_type].items()) + for fmt_key, fmt_info in sorted_formats: + label = fmt_key.upper() + value = fmt_key + format_options.append((label, value)) + + return (target_type, format_options) def _get_library_options(self) -> list: """Get available library options from config.json.""" diff --git a/TUI/modalscreen/search.py b/TUI/modalscreen/search.py index 7ba37bf..fcfdea1 100644 --- a/TUI/modalscreen/search.py +++ b/TUI/modalscreen/search.py @@ -15,6 +15,8 @@ import asyncio # Add parent directory to path for imports sys.path.insert(0, str(Path(__file__).parent.parent)) from config import load_config +from result_table import ResultTable +from helper.search_provider import get_provider logger = logging.getLogger(__name__) @@ -49,7 +51,8 @@ class SearchModal(ModalScreen): self.results_table: Optional[DataTable] = None self.tags_textarea: Optional[TextArea] = None self.library_source_select: Optional[Select] = None - self.current_results: List[dict] = [] + self.current_results: List[Any] = [] # List of SearchResult objects + self.current_result_table: Optional[ResultTable] = None self.is_searching = False self.current_worker = None # Track worker for search operations @@ -125,124 +128,6 @@ class SearchModal(ModalScreen): # Focus on search input self.search_input.focus() - async def _search_openlibrary(self, query: str) -> List[dict]: - """Search OpenLibrary for books.""" - try: - from helper.search_provider import get_provider - - logger.info(f"[search-modal] Searching OpenLibrary for: {query}") - - # Get the OpenLibrary provider (now has smart search built-in) - provider = get_provider("openlibrary") - if not provider: - logger.error("[search-modal] OpenLibrary provider not available") - return [] - - # Search using the provider (smart search is now default) - search_results = provider.search(query, limit=20) - - formatted_results = [] - for result in search_results: - # Extract metadata from SearchResult.full_metadata - metadata = result.full_metadata or {} - - formatted_results.append({ - "title": result.title, - "author": ", ".join(metadata.get("authors", [])) if metadata.get("authors") else "Unknown", - "year": metadata.get("year", ""), - "publisher": metadata.get("publisher", ""), - "isbn": metadata.get("isbn", ""), - "oclc": metadata.get("oclc", ""), - "lccn": metadata.get("lccn", ""), - "openlibrary_id": metadata.get("olid", ""), - "pages": metadata.get("pages", ""), - "language": metadata.get("language", ""), - "source": "openlibrary", - "columns": result.columns, - "raw_data": metadata - }) - - logger.info(f"[search-modal] Found {len(formatted_results)} OpenLibrary results") - return formatted_results - - except Exception as e: - logger.error(f"[search-modal] OpenLibrary search error: {e}", exc_info=True) - import traceback - traceback.print_exc() - return [] - - async def _search_soulseek(self, query: str) -> List[dict]: - """Search Soulseek for music with automatic worker tracking.""" - try: - from helper.search_provider import get_provider - - # Create worker for tracking - worker = None - if self.app_instance and hasattr(self.app_instance, 'create_worker'): - worker = self.app_instance.create_worker( - 'soulseek', - title=f"Soulseek Search: {query[:40]}", - description=f"Searching P2P network for music" - ) - self.current_worker = worker - - if worker: - worker.log_step("Connecting to Soulseek peer network...") - - logger.info(f"[search-modal] Searching Soulseek for: {query}") - provider = get_provider("soulseek") - search_results = provider.search(query, limit=20) - - if worker: - worker.log_step(f"Search returned {len(search_results)} results") - - logger.info(f"[search-modal] Found {len(search_results)} Soulseek results") - - # Format results for display - formatted_results = [] - for idx, result in enumerate(search_results): - metadata = result.full_metadata or {} - artist = metadata.get('artist', '') - album = metadata.get('album', '') - title = result.title - track_num = metadata.get('track_num', '') - size_bytes = result.size_bytes or 0 - - # Format size as human-readable - if size_bytes > 1024 * 1024: - size_str = f"{size_bytes / (1024 * 1024):.1f} MB" - elif size_bytes > 1024: - size_str = f"{size_bytes / 1024:.1f} KB" - else: - size_str = f"{size_bytes} B" - - # Build columns for display - columns = [ - ("#", str(idx + 1)), - ("Title", title[:50] if title else "Unknown"), - ("Artist", artist[:30] if artist else "(no artist)"), - ("Album", album[:30] if album else ""), - ] - - formatted_results.append({ - "title": title if title else "Unknown", - "artist": artist if artist else "(no artist)", - "album": album, - "track": track_num, - "filesize": size_str, - "bitrate": "", # Not available in Soulseek results - "source": "soulseek", - "columns": columns, - "raw_data": result.to_dict() - }) - - return formatted_results - except Exception as e: - logger.error(f"[search-modal] Soulseek search error: {e}") - import traceback - traceback.print_exc() - return [] - async def _perform_search(self) -> None: """Perform the actual search based on selected source.""" if not self.search_input or not self.source_select or not self.results_table: @@ -257,87 +142,69 @@ class SearchModal(ModalScreen): source = self.source_select.value # Clear existing results - self.results_table.clear() + self.results_table.clear(columns=True) self.current_results = [] + self.current_result_table = None self.is_searching = True + # Create worker for tracking + if self.app_instance and hasattr(self.app_instance, 'create_worker'): + self.current_worker = self.app_instance.create_worker( + source, + title=f"{source.capitalize()} Search: {query[:40]}", + description=f"Searching {source} for: {query}" + ) + self.current_worker.log_step(f"Connecting to {source}...") + try: - if source == "openlibrary": - results = await self._search_openlibrary(query) - elif source == "soulseek": - results = await self._search_soulseek(query) - else: - logger.warning(f"[search-modal] Unknown source: {source}") + provider = get_provider(source) + if not provider: + logger.error(f"[search-modal] Provider not available: {source}") if self.current_worker: - self.current_worker.finish("error", "Unknown search source") + self.current_worker.finish("error", f"Provider not available: {source}") return - + + logger.info(f"[search-modal] Searching {source} for: {query}") + results = provider.search(query, limit=20) self.current_results = results - # Populate table with results - if results: - # Check if first result has columns field - first_result = results[0] - if "columns" in first_result and first_result["columns"]: - # Use dynamic columns from result - # Clear existing columns and rebuild based on result columns - self.results_table.clear() - - # Extract column headers from first result's columns field - column_headers = [col[0] for col in first_result["columns"]] - - # Remove existing columns (we'll readd them with the right headers) - # Note: This is a workaround since Textual's DataTable doesn't support dynamic column management well - # For now, we just use the dynamic column headers from the result - logger.info(f"[search-modal] Using dynamic columns: {column_headers}") - - # Populate rows using the column order from results - for result in results: - if "columns" in result and result["columns"]: - # Extract values in column order - row_data = [col[1] for col in result["columns"]] - self.results_table.add_row(*row_data) - else: - # Fallback for results without columns - logger.warning(f"[search-modal] Result missing columns field: {result.get('title', 'Unknown')}") - else: - # Fallback to original hardcoded behavior if columns not available - logger.info("[search-modal] No dynamic columns found, using default formatting") - - for result in results: - if source == "openlibrary": - # Format OpenLibrary results (original hardcoded) - year = str(result.get("year", ""))[:4] if result.get("year") else "" - details = f"ISBN: {result.get('isbn', '')}" if result.get('isbn') else "" - if result.get('openlibrary_id'): - details += f" | OL: {result.get('openlibrary_id')}" - - row_data = [ - result["title"][:60], - result["author"][:35], - year, - details[:40] - ] - else: # soulseek - row_data = [ - result["title"][:50], - result["artist"][:30], - result["album"][:30], - result['filesize'] - ] - - self.results_table.add_row(*row_data) - else: - # Add a "no results" message - self.results_table.add_row("No results found", "", "", "") + if self.current_worker: + self.current_worker.log_step(f"Found {len(results)} results") - # Finish worker if tracking + # Create ResultTable + table = ResultTable(f"Search Results: {query}") + for res in results: + row = table.add_row() + # Add columns from result.columns + if res.columns: + for name, value in res.columns: + row.add_column(name, value) + else: + # Fallback if no columns defined + row.add_column("Title", res.title) + row.add_column("Target", res.target) + + self.current_result_table = table + + # Populate UI + if table.rows: + # Add headers + headers = [col.name for col in table.rows[0].columns] + self.results_table.add_columns(*headers) + # Add rows + for row_vals in table.to_datatable_rows(): + self.results_table.add_row(*row_vals) + else: + self.results_table.add_columns("Message") + self.results_table.add_row("No results found") + + # Finish worker if self.current_worker: self.current_worker.finish("completed", f"Found {len(results)} results") except Exception as e: - logger.error(f"[search-modal] Search error: {e}") + logger.error(f"[search-modal] Search error: {e}", exc_info=True) if self.current_worker: self.current_worker.finish("error", f"Search failed: {str(e)}") @@ -382,35 +249,58 @@ class SearchModal(ModalScreen): selected_row = self.results_table.cursor_row if 0 <= selected_row < len(self.current_results): result = self.current_results[selected_row] + + # Convert to dict if needed for submission + if hasattr(result, 'to_dict'): + result_dict = result.to_dict() + else: + result_dict = result + # Get tags from textarea tags_text = self.tags_textarea.text if self.tags_textarea else "" # Get library source (if OpenLibrary) library_source = self.library_source_select.value if self.library_source_select else "local" # Add tags and source to result - result["tags_text"] = tags_text - result["library_source"] = library_source + result_dict["tags_text"] = tags_text + result_dict["library_source"] = library_source # Post message and dismiss - self.post_message(self.SearchSelected(result)) - self.dismiss(result) + self.post_message(self.SearchSelected(result_dict)) + self.dismiss(result_dict) else: logger.warning("[search-modal] No result selected for submission") elif button_id == "cancel-button": self.dismiss(None) - def _populate_tags_from_result(self, result: dict) -> None: + def _populate_tags_from_result(self, result: Any) -> None: """Populate the tags textarea from a selected result.""" if not self.tags_textarea: return + # Handle both SearchResult objects and dicts + if hasattr(result, 'full_metadata'): + metadata = result.full_metadata or {} + source = result.origin + title = result.title + else: + # Handle dict (legacy or from to_dict) + if 'full_metadata' in result: + metadata = result['full_metadata'] or {} + elif 'raw_data' in result: + metadata = result['raw_data'] or {} + else: + metadata = result + + source = result.get('origin', result.get('source', '')) + title = result.get('title', '') + # Format tags based on result source - if result.get("source") == "openlibrary": + if source == "openlibrary": # For OpenLibrary: title, author, year - title = result.get("title", "") - author = result.get("author", "") - year = result.get("year", "") + author = ", ".join(metadata.get("authors", [])) if isinstance(metadata.get("authors"), list) else metadata.get("authors", "") + year = str(metadata.get("year", "")) tags = [] if title: tags.append(title) @@ -419,38 +309,51 @@ class SearchModal(ModalScreen): if year: tags.append(year) tags_text = "\n".join(tags) - else: # soulseek + elif source == "soulseek": # For Soulseek: artist, album, title, track tags = [] - if result.get("artist"): - tags.append(result["artist"]) - if result.get("album"): - tags.append(result["album"]) - if result.get("track"): - tags.append(f"Track {result['track']}") - if result.get("title"): - tags.append(result["title"]) + if metadata.get("artist"): + tags.append(metadata["artist"]) + if metadata.get("album"): + tags.append(metadata["album"]) + if metadata.get("track_num"): + tags.append(f"Track {metadata['track_num']}") + if title: + tags.append(title) + tags_text = "\n".join(tags) + else: + # Generic fallback + tags = [title] tags_text = "\n".join(tags) self.tags_textarea.text = tags_text logger.info(f"[search-modal] Populated tags textarea from result") - async def _download_book(self, result: dict) -> None: + async def _download_book(self, result: Any) -> None: """Download a book from OpenLibrary using unified downloader.""" try: from helper.unified_book_downloader import UnifiedBookDownloader from config import load_config - logger.info(f"[search-modal] Starting download for: {result.get('title')}") + # Convert SearchResult to dict if needed + if hasattr(result, 'to_dict'): + result_dict = result.to_dict() + # Ensure raw_data is populated for downloader + if 'raw_data' not in result_dict and result.full_metadata: + result_dict['raw_data'] = result.full_metadata + else: + result_dict = result + + logger.info(f"[search-modal] Starting download for: {result_dict.get('title')}") config = load_config() downloader = UnifiedBookDownloader(config=config) # Get download options for this book - options = downloader.get_download_options(result) + options = downloader.get_download_options(result_dict) if not options['methods']: - logger.warning(f"[search-modal] No download methods available for: {result.get('title')}") + logger.warning(f"[search-modal] No download methods available for: {result_dict.get('title')}") # Could show a modal dialog here return diff --git a/TUI/pipeline_runner.py b/TUI/pipeline_runner.py index fb6d264..1ebfc9b 100644 --- a/TUI/pipeline_runner.py +++ b/TUI/pipeline_runner.py @@ -40,6 +40,7 @@ class PipelineStageResult: name: str args: Sequence[str] emitted: List[Any] = field(default_factory=list) + result_table: Optional[Any] = None # ResultTable object if available status: str = "pending" error: Optional[str] = None @@ -52,6 +53,7 @@ class PipelineRunResult: success: bool stages: List[PipelineStageResult] = field(default_factory=list) emitted: List[Any] = field(default_factory=list) + result_table: Optional[Any] = None # Final ResultTable object if available stdout: str = "" stderr: str = "" error: Optional[str] = None @@ -146,6 +148,7 @@ class PipelineExecutor: if index == len(stages) - 1: result.emitted = stage.emitted + result.result_table = stage.result_table else: piped_result = stage.emitted @@ -211,6 +214,10 @@ class PipelineExecutor: emitted = list(getattr(pipeline_ctx, "emits", []) or []) stage.emitted = emitted + + # Capture the ResultTable if the cmdlet set one + # Check display table first (overlay), then last result table + stage.result_table = ctx.get_display_table() or ctx.get_last_result_table() if return_code != 0: stage.status = "failed" @@ -224,7 +231,12 @@ class PipelineExecutor: label = f"[Stage {index + 1}/{total}] {cmd_name} {stage.status}" self._worker_manager.log_step(worker_id, label) - ctx.set_last_result_table(None, emitted) + # Don't clear the table if we just captured it, but ensure items are set for next stage + # If we have a table, we should probably keep it in ctx for history if needed + # But for pipeline execution, we mainly care about passing items to next stage + # ctx.set_last_result_table(None, emitted) <-- This was clearing it + + # Ensure items are available for next stage ctx.set_last_items(emitted) return stage diff --git a/TUI/tui.py b/TUI/tui.py index 0f634bd..59fef72 100644 --- a/TUI/tui.py +++ b/TUI/tui.py @@ -3,7 +3,7 @@ from __future__ import annotations import sys from pathlib import Path -from typing import Any, Dict, List, Optional, Sequence +from typing import Any, List, Optional, Sequence from textual import work from textual.app import App, ComposeResult @@ -32,10 +32,9 @@ for path in (BASE_DIR, ROOT_DIR): from menu_actions import ( # type: ignore # noqa: E402 PIPELINE_PRESETS, PipelinePreset, - build_metadata_snapshot, - summarize_result, ) from pipeline_runner import PipelineExecutor, PipelineRunResult # type: ignore # noqa: E402 +from result_table import ResultTable # type: ignore # noqa: E402 class PresetListItem(ListItem): @@ -73,6 +72,7 @@ class PipelineHubApp(App): self.worker_table: Optional[DataTable] = None self.preset_list: Optional[ListView] = None self.status_panel: Optional[Static] = None + self.current_result_table: Optional[ResultTable] = None self._pipeline_running = False # ------------------------------------------------------------------ @@ -81,7 +81,7 @@ class PipelineHubApp(App): def compose(self) -> ComposeResult: # noqa: D401 - Textual compose hook yield Header(show_clock=True) with Container(id="app-shell"): - with Horizontal(id="command-row"): + with Horizontal(id="command-pane"): self.command_input = Input( placeholder='download-data "" | merge-file | add-tag | add-file -storage local', id="pipeline-input", @@ -174,7 +174,7 @@ class PipelineHubApp(App): return index = event.cursor_row if 0 <= index < len(self.result_items): - self._display_metadata(self.result_items[index]) + self._display_metadata(index) # ------------------------------------------------------------------ # Pipeline execution helpers @@ -216,6 +216,7 @@ class PipelineHubApp(App): else: self.result_items = [] + self.current_result_table = run_result.result_table self._populate_results_table() self.refresh_workers() @@ -228,40 +229,45 @@ class PipelineHubApp(App): def _populate_results_table(self) -> None: if not self.results_table: return - self.results_table.clear() - if not self.result_items: - self.results_table.add_row("—", "No results", "", "") - return - for idx, item in enumerate(self.result_items, start=1): - if isinstance(item, dict): - title = summarize_result(item) - source = item.get("source") or item.get("cmdlet_name") or item.get("cmdlet") or "—" - file_path = item.get("file_path") or item.get("path") or "—" - else: - title = str(item) - source = "—" - file_path = "—" - self.results_table.add_row(str(idx), title, source, file_path, key=str(idx - 1)) + self.results_table.clear(columns=True) - def _display_metadata(self, item: Any) -> None: + if self.current_result_table and self.current_result_table.rows: + # Use ResultTable headers from the first row + first_row = self.current_result_table.rows[0] + headers = ["#"] + [col.name for col in first_row.columns] + self.results_table.add_columns(*headers) + + rows = self.current_result_table.to_datatable_rows() + for idx, row_values in enumerate(rows, 1): + self.results_table.add_row(str(idx), *row_values, key=str(idx - 1)) + else: + # Fallback or empty state + self.results_table.add_columns("Row", "Title", "Source", "File") + if not self.result_items: + self.results_table.add_row("—", "No results", "", "") + return + + # Fallback for items without a table + for idx, item in enumerate(self.result_items, start=1): + self.results_table.add_row(str(idx), str(item), "—", "—", key=str(idx - 1)) + + def _display_metadata(self, index: int) -> None: if not self.metadata_tree: return root = self.metadata_tree.root root.label = "Metadata" root.remove_children() - payload: Dict[str, Any] - if isinstance(item, dict): - file_path = item.get("file_path") or item.get("path") - if file_path: - payload = build_metadata_snapshot(Path(file_path)) + if self.current_result_table and 0 <= index < len(self.current_result_table.rows): + row = self.current_result_table.rows[index] + for col in row.columns: + root.add(f"[b]{col.name}[/b]: {col.value}") + elif 0 <= index < len(self.result_items): + item = self.result_items[index] + if isinstance(item, dict): + self._populate_tree_node(root, item) else: - payload = item - else: - payload = {"value": str(item)} - - self._populate_tree_node(root, payload) - root.expand_all() + root.add(str(item)) def _populate_tree_node(self, node, data: Any) -> None: if isinstance(data, dict): @@ -278,14 +284,14 @@ class PipelineHubApp(App): def _clear_log(self) -> None: self.log_lines = [] if self.log_output: - self.log_output.value = "" + self.log_output.text = "" def _append_log_line(self, line: str) -> None: self.log_lines.append(line) if len(self.log_lines) > 500: self.log_lines = self.log_lines[-500:] if self.log_output: - self.log_output.value = "\n".join(self.log_lines) + self.log_output.text = "\n".join(self.log_lines) def _append_block(self, text: str) -> None: for line in text.strip().splitlines(): diff --git a/TUI/tui.tcss b/TUI/tui.tcss index 3f2fd19..554b69e 100644 --- a/TUI/tui.tcss +++ b/TUI/tui.tcss @@ -6,7 +6,7 @@ layout: vertical; } -#command-row { +#command-pane { width: 100%; height: auto; background: $boost; @@ -18,7 +18,6 @@ width: 1fr; min-height: 3; padding: 0 1; - margin-right: 1; background: $surface; color: $text; border: round $primary; @@ -30,7 +29,9 @@ } #status-panel { - min-width: 20; + width: auto; + max-width: 25; + height: 3; text-style: bold; content-align: center middle; padding: 0 1; @@ -52,7 +53,7 @@ } #left-pane { - max-width: 48; + max-width: 60; } .section-title { @@ -67,6 +68,11 @@ margin-bottom: 1; } +#preset-list { + height: 25; + border: solid $secondary; +} + #log-output { height: 16; } @@ -97,4 +103,10 @@ .status-error { background: $error 20%; color: $error; +} + +#run-button { + width: auto; + min-width: 10; + margin: 0 1; } \ No newline at end of file diff --git a/cmdlets/__init__.py b/cmdlets/__init__.py index 5912b6a..904de25 100644 --- a/cmdlets/__init__.py +++ b/cmdlets/__init__.py @@ -119,7 +119,9 @@ for filename in os.listdir(cmdlet_dir): for alias in cmdlet_obj.aliases: normalized_alias = alias.replace('_', '-').lower() REGISTRY[normalized_alias] = run_fn - except Exception: + except Exception as e: + import sys + print(f"Error importing cmdlet '{mod_name}': {e}", file=sys.stderr) continue # Import root-level modules that also register cmdlets diff --git a/cmdlets/add_file.py b/cmdlets/add_file.py index 235d421..61c09ea 100644 --- a/cmdlets/add_file.py +++ b/cmdlets/add_file.py @@ -371,7 +371,7 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int: # Extract tags/known URLs from pipeline objects if available pipe_object_tags = extract_tags_from_result(result) if pipe_object_tags: - log(f"Extracted {len(pipe_object_tags)} tag(s) from pipeline result: {', '.join(pipe_object_tags[:5])}", file=sys.stderr) + debug(f"Extracted {len(pipe_object_tags)} tag(s) from pipeline result: {', '.join(pipe_object_tags[:5])}", file=sys.stderr) pipe_known_urls = extract_known_urls_from_result(result) # Resolve media path: get from piped result @@ -574,11 +574,11 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int: try: file_provider = get_file_provider(provider_name, config) if file_provider is None: - log(f"❌ File provider '{provider_name}' not available", file=sys.stderr) + log(f"File provider '{provider_name}' not available", file=sys.stderr) return 1 hoster_url = file_provider.upload(media_path) - log(f"✅ File uploaded to {provider_name}: {hoster_url}", file=sys.stderr) + log(f"File uploaded to {provider_name}: {hoster_url}", file=sys.stderr) # Associate the URL with the file in Hydrus if possible current_hash = locals().get('file_hash') @@ -590,12 +590,12 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int: client = hydrus_wrapper.get_client(config) if client: client.associate_url(current_hash, hoster_url) - log(f"✅ Associated URL with file hash {current_hash}", file=sys.stderr) + debug(f"Associated URL with file hash {current_hash}", file=sys.stderr) except Exception as exc: - log(f"⚠️ Could not associate URL with Hydrus file: {exc}", file=sys.stderr) + log(f"Could not associate URL with Hydrus file: {exc}", file=sys.stderr) except Exception as exc: - log(f"❌ {provider_name} upload failed: {exc}", file=sys.stderr) + log(f"{provider_name} upload failed: {exc}", file=sys.stderr) return 1 if delete_after_upload: @@ -632,7 +632,7 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int: log("❌ No local storage path configured. Set 'storage.local.path' in config.json", file=sys.stderr) return 1 - log(f"Moving into configured local library: {resolved_dir}", file=sys.stderr) + debug(f"Moving into configured local library: {resolved_dir}", file=sys.stderr) exit_code, dest_path = _handle_local_transfer(media_path, Path(resolved_dir), result, config) # After successful local transfer, emit result for pipeline continuation @@ -713,7 +713,7 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int: except Exception as exc: log(f"❌ Failed to compute file hash: {exc}", file=sys.stderr) return 1 - log(f"File hash: {file_hash}", file=sys.stderr) + debug(f"File hash: {file_hash}", file=sys.stderr) # Read sidecar tags and known URLs first (for tagging) @@ -789,9 +789,9 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int: config=config, tags=tags, ) - log(f"✅ File uploaded to Hydrus: {file_hash}", file=sys.stderr) + log(f"Hydrus: {file_hash}", file=sys.stderr) except Exception as exc: - log(f"❌ Hydrus upload failed: {exc}", file=sys.stderr) + log(f"Failed: {exc}", file=sys.stderr) return 1 # Associate known URLs in Hydrus metadata diff --git a/cmdlets/add_tags.py b/cmdlets/add_tags.py index 5b5faec..0709d55 100644 --- a/cmdlets/add_tags.py +++ b/cmdlets/add_tags.py @@ -13,7 +13,7 @@ import pipeline as ctx from ._shared import normalize_result_input, filter_results_by_temp from helper import hydrus as hydrus_wrapper from helper.local_library import read_sidecar, write_sidecar, find_sidecar, has_sidecar, LocalLibraryDB -from metadata import rename_by_metadata +from metadata import rename from ._shared import Cmdlet, CmdletArg, normalize_hash, parse_tag_arguments, expand_tag_groups, parse_cmdlet_args from config import get_local_storage_path diff --git a/cmdlets/delete_file.py b/cmdlets/delete_file.py index 1140038..da6ed8a 100644 --- a/cmdlets/delete_file.py +++ b/cmdlets/delete_file.py @@ -4,7 +4,7 @@ from typing import Any, Dict, Sequence import json import sys -from helper.logger import log +from helper.logger import debug, log import sqlite3 from pathlib import Path @@ -84,64 +84,28 @@ def _delete_database_entry(db_path: Path, file_path: str) -> bool: return False -def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: - # Help - try: - if any(str(a).lower() in {"-?", "/?", "--help", "-h", "help", "--cmdlet"} for a in args): - log(json.dumps(CMDLET, ensure_ascii=False, indent=2)) - return 0 - except Exception: - pass - - # Handle @N selection which creates a list - extract the first item - if isinstance(result, list) and len(result) > 0: - result = result[0] - - # Parse overrides and options - override_hash: str | None = None - conserve: str | None = None - lib_root: str | None = None - reason_tokens: list[str] = [] - i = 0 - while i < len(args): - token = args[i] - low = str(token).lower() - if low in {"-hash", "--hash", "hash"} and i + 1 < len(args): - override_hash = str(args[i + 1]).strip() - i += 2 - continue - if low in {"-conserve", "--conserve"} and i + 1 < len(args): - value = str(args[i + 1]).strip().lower() - if value in {"local", "hydrus"}: - conserve = value - i += 2 - continue - if low in {"-lib-root", "--lib-root", "lib-root"} and i + 1 < len(args): - lib_root = str(args[i + 1]).strip() - i += 2 - continue - reason_tokens.append(token) - i += 1 - - # Handle result as either dict or object - if isinstance(result, dict): - hash_hex_raw = result.get("hash_hex") or result.get("hash") - target = result.get("target") - origin = result.get("origin") +def _process_single_item(item: Any, override_hash: str | None, conserve: str | None, + lib_root: str | None, reason: str, config: Dict[str, Any]) -> bool: + """Process deletion for a single item.""" + # Handle item as either dict or object + if isinstance(item, dict): + hash_hex_raw = item.get("hash_hex") or item.get("hash") + target = item.get("target") + origin = item.get("origin") else: - hash_hex_raw = getattr(result, "hash_hex", None) or getattr(result, "hash", None) - target = getattr(result, "target", None) - origin = getattr(result, "origin", None) + hash_hex_raw = getattr(item, "hash_hex", None) or getattr(item, "hash", None) + target = getattr(item, "target", None) + origin = getattr(item, "origin", None) # For Hydrus files, the target IS the hash if origin and origin.lower() == "hydrus" and not hash_hex_raw: hash_hex_raw = target hash_hex = normalize_hash(override_hash) if override_hash else normalize_hash(hash_hex_raw) - reason = " ".join(token for token in reason_tokens if str(token).strip()).strip() local_deleted = False local_target = isinstance(target, str) and target.strip() and not str(target).lower().startswith(("http://", "https://")) + if conserve != "local" and local_target: path = Path(str(target)) file_path_str = str(target) # Keep the original string for DB matching @@ -168,8 +132,6 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: if lib_root: lib_root_path = Path(lib_root) db_path = lib_root_path / ".downlow_library.db" - log(f"Attempting DB cleanup: lib_root={lib_root}, db_path={db_path}", file=sys.stderr) - log(f"Deleting DB entry for: {file_path_str}", file=sys.stderr) if _delete_database_entry(db_path, file_path_str): if ctx._PIPE_ACTIVE: ctx.emit(f"Removed database entry: {path.name}") @@ -178,7 +140,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: else: log(f"Database entry not found or cleanup failed for {file_path_str}", file=sys.stderr) else: - log(f"No lib_root provided, skipping database cleanup", file=sys.stderr) + debug(f"No lib_root provided, skipping database cleanup", file=sys.stderr) hydrus_deleted = False if conserve != "hydrus" and hash_hex: @@ -187,12 +149,12 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: except Exception as exc: if not local_deleted: log(f"Hydrus client unavailable: {exc}", file=sys.stderr) - return 1 + return False else: if client is None: if not local_deleted: log("Hydrus client unavailable", file=sys.stderr) - return 1 + return False else: payload: Dict[str, Any] = {"hashes": [hash_hex]} if reason: @@ -201,11 +163,11 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: client._post("/add_files/delete_files", data=payload) # type: ignore[attr-defined] hydrus_deleted = True preview = hash_hex[:12] + ('…' if len(hash_hex) > 12 else '') - log(f"Deleted from Hydrus: {preview}…", file=sys.stderr) + debug(f"Deleted from Hydrus: {preview}…", file=sys.stderr) except Exception as exc: log(f"Hydrus delete failed: {exc}", file=sys.stderr) if not local_deleted: - return 1 + return False if hydrus_deleted and hash_hex: preview = hash_hex[:12] + ('…' if len(hash_hex) > 12 else '') @@ -216,10 +178,64 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: ctx.emit(f"Deleted {preview}.") if hydrus_deleted or local_deleted: - return 0 + return True log("Selected result has neither Hydrus hash nor local file target") - return 1 + return False + + +def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: + # Help + try: + if any(str(a).lower() in {"-?", "/?", "--help", "-h", "help", "--cmdlet"} for a in args): + log(json.dumps(CMDLET, ensure_ascii=False, indent=2)) + return 0 + except Exception: + pass + + override_hash: str | None = None + conserve: str | None = None + lib_root: str | None = None + reason_tokens: list[str] = [] + i = 0 + while i < len(args): + token = args[i] + low = str(token).lower() + if low in {"-hash", "--hash", "hash"} and i + 1 < len(args): + override_hash = str(args[i + 1]).strip() + i += 2 + continue + if low in {"-conserve", "--conserve"} and i + 1 < len(args): + value = str(args[i + 1]).strip().lower() + if value in {"local", "hydrus"}: + conserve = value + i += 2 + continue + if low in {"-lib-root", "--lib-root", "lib-root"} and i + 1 < len(args): + lib_root = str(args[i + 1]).strip() + i += 2 + continue + reason_tokens.append(token) + i += 1 + + reason = " ".join(token for token in reason_tokens if str(token).strip()).strip() + + items = [] + if isinstance(result, list): + items = result + elif result: + items = [result] + + if not items: + log("No items to delete", file=sys.stderr) + return 1 + + success_count = 0 + for item in items: + if _process_single_item(item, override_hash, conserve, lib_root, reason, config): + success_count += 1 + + return 0 if success_count > 0 else 1 CMDLET = Cmdlet( name="delete-file", diff --git a/cmdlets/delete_tag.py b/cmdlets/delete_tag.py index c131ec8..7a19fcd 100644 --- a/cmdlets/delete_tag.py +++ b/cmdlets/delete_tag.py @@ -8,7 +8,7 @@ import models import pipeline as ctx from helper import hydrus as hydrus_wrapper from ._shared import Cmdlet, CmdletArg, normalize_hash, parse_tag_arguments -from helper.logger import log +from helper.logger import debug, log CMDLET = Cmdlet( @@ -68,6 +68,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: # @5 or @{2,5,8} to delete tags from ResultTable by index tags_from_at_syntax = [] hash_from_at_syntax = None + file_path_from_at_syntax = None if rest and str(rest[0]).startswith("@"): selector_arg = str(rest[0]) @@ -100,6 +101,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: # Also get hash from first item for consistency if not hash_from_at_syntax: hash_from_at_syntax = getattr(item, 'hash_hex', None) + if not file_path_from_at_syntax: + file_path_from_at_syntax = getattr(item, 'file_path', None) if not tags_from_at_syntax: log(f"No tags found at indices: {indices}") @@ -112,108 +115,165 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: return 1 # Handle @N selection which creates a list - extract the first item - if isinstance(result, list) and len(result) > 0: - # If we have a list of TagItems, we want to process ALL of them if no args provided - # This handles: delete-tag @1 (where @1 expands to a list containing one TagItem) - if not args and hasattr(result[0], '__class__') and result[0].__class__.__name__ == 'TagItem': - # We will extract tags from the list later - pass - else: - result = result[0] + # If we have a list of TagItems, we want to process ALL of them if no args provided + # This handles: delete-tag @1 (where @1 expands to a list containing one TagItem) + # Also handles: delete-tag @1,2 (where we want to delete tags from multiple files) - # Determine tags and hash to use - tags: list[str] = [] - hash_hex = None + # Normalize result to a list for processing + items_to_process = [] + if isinstance(result, list): + items_to_process = result + elif result: + items_to_process = [result] + + # If we have TagItems and no args, we are deleting the tags themselves + # If we have Files (or other objects) and args, we are deleting tags FROM those files + + # Check if we are in "delete selected tags" mode (TagItems) + is_tag_item_mode = (items_to_process and hasattr(items_to_process[0], '__class__') and + items_to_process[0].__class__.__name__ == 'TagItem') + + if is_tag_item_mode: + # Collect all tags to delete from the TagItems + # Group by hash/file_path to batch operations if needed, or just process one by one + # For simplicity, we'll process one by one or group by file + pass + else: + # "Delete tags from files" mode + # We need args (tags to delete) + if not args and not tags_from_at_syntax: + log("Requires at least one tag argument when deleting from files") + return 1 + + # Process each item + success_count = 0 + + # If we have tags from @ syntax (e.g. delete-tag @{1,2}), we ignore the piped result for tag selection + # but we might need the piped result for the file context if @ selection was from a Tag table + # Actually, the @ selection logic above already extracted tags. if tags_from_at_syntax: - # Use tags extracted from @ syntax + # Special case: @ selection of tags. + # We already extracted tags and hash/path. + # Just run the deletion once using the extracted info. + # This preserves the existing logic for @ selection. + tags = tags_from_at_syntax hash_hex = normalize_hash(override_hash) if override_hash else normalize_hash(hash_from_at_syntax) - log(f"[delete_tag] Using @ syntax extraction: {len(tags)} tag(s) to delete: {tags}") - elif isinstance(result, list) and result and hasattr(result[0], '__class__') and result[0].__class__.__name__ == 'TagItem': - # Got a list of TagItems (e.g. from delete-tag @1) - tags = [getattr(item, 'tag_name') for item in result if getattr(item, 'tag_name', None)] - # Use hash from first item - hash_hex = normalize_hash(override_hash) if override_hash else normalize_hash(getattr(result[0], "hash_hex", None)) - elif result and hasattr(result, '__class__') and result.__class__.__name__ == 'TagItem': - # Got a piped TagItem - delete this specific tag - tag_name = getattr(result, 'tag_name', None) - if tag_name: - tags = [tag_name] - hash_hex = normalize_hash(override_hash) if override_hash else normalize_hash(getattr(result, "hash_hex", None)) + file_path = file_path_from_at_syntax + + if _process_deletion(tags, hash_hex, file_path, config): + success_count += 1 + else: - # Traditional mode - parse tag arguments - tags = parse_tag_arguments(rest) - hash_hex = normalize_hash(override_hash) if override_hash else normalize_hash(getattr(result, "hash_hex", None)) + # Process items from pipe (or single result) + # If args are provided, they are the tags to delete from EACH item + # If items are TagItems and no args, the tag to delete is the item itself + + tags_arg = parse_tag_arguments(rest) + + for item in items_to_process: + tags_to_delete = [] + item_hash = normalize_hash(override_hash) if override_hash else normalize_hash(getattr(item, "hash_hex", None)) + item_path = getattr(item, "path", None) or getattr(item, "file_path", None) or getattr(item, "target", None) + # If result is a dict (e.g. from search-file), try getting path from keys + if not item_path and isinstance(item, dict): + item_path = item.get("path") or item.get("file_path") or item.get("target") + + item_source = getattr(item, "source", None) + + if hasattr(item, '__class__') and item.__class__.__name__ == 'TagItem': + # It's a TagItem + if tags_arg: + # User provided tags to delete FROM this file (ignoring the tag name in the item?) + # Or maybe they want to delete the tag in the item AND the args? + # Usually if piping TagItems, we delete THOSE tags. + # If args are present, maybe we should warn? + # For now, if args are present, assume they override or add to the tag item? + # Let's assume if args are present, we use args. If not, we use the tag name. + tags_to_delete = tags_arg + else: + tag_name = getattr(item, 'tag_name', None) + if tag_name: + tags_to_delete = [tag_name] + else: + # It's a File or other object + if tags_arg: + tags_to_delete = tags_arg + else: + # No tags provided for a file object - skip or error? + # We already logged an error if no args and not TagItem mode globally, + # but inside the loop we might have mixed items? Unlikely. + continue + + if tags_to_delete and (item_hash or item_path): + if _process_deletion(tags_to_delete, item_hash, item_path, config, source=item_source): + success_count += 1 + + if success_count > 0: + return 0 + return 1 + +def _process_deletion(tags: list[str], hash_hex: str | None, file_path: str | None, config: Dict[str, Any], source: str | None = None) -> bool: + """Helper to execute the deletion logic for a single target.""" if not tags: - log("No valid tags were provided") - return 1 - + return False + + if not hash_hex and not file_path: + log("Item does not include a hash or file path") + return False + + # Handle local file tag deletion + if file_path and (source == "local" or (not hash_hex and source != "hydrus")): + try: + from helper.local_library import LocalLibraryDB + from pathlib import Path + + path_obj = Path(file_path) + if not path_obj.exists(): + log(f"File not found: {file_path}") + return False + + # Try to get local storage path from config + from config import get_local_storage_path + local_root = get_local_storage_path(config) + + if not local_root: + # Fallback: assume file is in a library root or use its parent + local_root = path_obj.parent + + db = LocalLibraryDB(local_root) + db.remove_tags(path_obj, tags) + debug(f"Removed {len(tags)} tag(s) from {path_obj.name} (local)") + return True + + except Exception as exc: + log(f"Failed to remove local tags: {exc}") + return False + + # Hydrus deletion logic if not hash_hex: - log("Selected result does not include a hash") - return 1 + return False try: service_name = hydrus_wrapper.get_tag_service_name(config) - except Exception as exc: - log(f"Failed to resolve tag service: {exc}") - return 1 - - try: client = hydrus_wrapper.get_client(config) - except Exception as exc: - log(f"Hydrus client unavailable: {exc}") - return 1 - - if client is None: - log("Hydrus client unavailable") - return 1 - - log(f"[delete_tag] Sending deletion request: hash={hash_hex}, tags={tags}, service={service_name}") - try: - result = client.delete_tags(hash_hex, tags, service_name) - log(f"[delete_tag] Hydrus response: {result}") + + if client is None: + log("Hydrus client unavailable") + return False + + debug(f"Sending deletion request: hash={hash_hex}, tags={tags}, service={service_name}") + client.delete_tags(hash_hex, tags, service_name) + + preview = hash_hex[:12] + ('…' if len(hash_hex) > 12 else '') + debug(f"Removed {len(tags)} tag(s) from {preview} via '{service_name}'.") + return True + except Exception as exc: log(f"Hydrus del-tag failed: {exc}") - return 1 - - preview = hash_hex[:12] + ('…' if len(hash_hex) > 12 else '') - log(f"Removed {len(tags)} tag(s) from {preview} via '{service_name}'.") - - # Re-fetch and emit updated tags after deletion - try: - payload = client.fetch_file_metadata(hashes=[str(hash_hex)], include_service_keys_to_tags=True, include_file_urls=False) - items = payload.get("metadata") if isinstance(payload, dict) else None - if isinstance(items, list) and items: - meta = items[0] if isinstance(items[0], dict) else None - if isinstance(meta, dict): - # Extract tags from updated metadata - from cmdlets.get_tag import _extract_my_tags_from_hydrus_meta, TagItem - service_key = hydrus_wrapper.get_tag_service_key(client, service_name) - updated_tags = _extract_my_tags_from_hydrus_meta(meta, service_key, service_name) - - # Emit updated tags as TagItem objects - from result_table import ResultTable - table = ResultTable("Tags", max_columns=2) - tag_items = [] - for idx, tag_name in enumerate(updated_tags, start=1): - tag_item = TagItem( - tag_name=tag_name, - tag_index=idx, - hash_hex=hash_hex, - source="hydrus", - service_name=service_name, - ) - tag_items.append(tag_item) - table.add_result(tag_item) - ctx.emit(tag_item) - - # Store items for @ selection in next command (CLI will handle table management) - # Don't call set_last_result_table so we don't pollute history or table context - except Exception as exc: - log(f"Warning: Could not fetch updated tags after deletion: {exc}", file=__import__('sys').stderr) - - return 0 + return False + diff --git a/cmdlets/download_data.py b/cmdlets/download_data.py index 301948e..d5773e7 100644 --- a/cmdlets/download_data.py +++ b/cmdlets/download_data.py @@ -1611,8 +1611,24 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results: # Priority 1: --storage flag if storage_location: try: - final_output_dir = SharedArgs.resolve_storage(storage_location) - debug(f"Using storage location: {storage_location} → {final_output_dir}") + # For 'local' storage, check config first before using default + if storage_location.lower() == 'local': + from config import get_local_storage_path + try: + configured_path = get_local_storage_path(config) + if configured_path: + final_output_dir = configured_path + debug(f"Using configured local storage path: {final_output_dir}") + else: + final_output_dir = SharedArgs.resolve_storage(storage_location) + debug(f"Using default storage location: {storage_location} → {final_output_dir}") + except Exception as exc: + log(f"⚠️ Error reading local storage config: {exc}", file=sys.stderr) + final_output_dir = SharedArgs.resolve_storage(storage_location) + debug(f"Falling back to default storage location: {storage_location} → {final_output_dir}") + else: + final_output_dir = SharedArgs.resolve_storage(storage_location) + debug(f"Using storage location: {storage_location} → {final_output_dir}") except ValueError as e: log(str(e), file=sys.stderr) return 1 @@ -2237,6 +2253,14 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results: if 0 < idx <= len(formats): fmt = formats[idx-1] current_format_selector = fmt.get("format_id") + + # If video-only format is selected, append +bestaudio to merge with best audio + vcodec = fmt.get("vcodec") + acodec = fmt.get("acodec") + if vcodec and vcodec != "none" and (not acodec or acodec == "none"): + current_format_selector = f"{current_format_selector}+bestaudio" + debug(f"Video-only format selected, appending bestaudio: {current_format_selector}") + debug(f"Selected format #{idx}: {current_format_selector}") playlist_items = None # Clear so it doesn't affect download options else: @@ -2461,6 +2485,10 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any], emit_results: from result_table import ResultTable table = ResultTable("Downloaded Files") for i, file_path in enumerate(downloaded_files): + # Ensure file_path is a Path object + if isinstance(file_path, str): + file_path = Path(file_path) + row = table.add_row() row.add_column("#", str(i + 1)) row.add_column("File", file_path.name) diff --git a/cmdlets/get_file.py b/cmdlets/get_file.py index d0b8fcc..2ef47ae 100644 --- a/cmdlets/get_file.py +++ b/cmdlets/get_file.py @@ -6,13 +6,15 @@ import shutil as _shutil import subprocess as _subprocess import json import sys +import platform from helper.logger import log, debug import uuid as _uuid import time as _time -from downlow_helpers.progress import print_progress, print_final_progress, format_size -from downlow_helpers.http_client import HTTPClient +from helper.progress import print_progress, print_final_progress +from helper.http_client import HTTPClient +from helper.mpv_ipc import get_ipc_pipe_path, send_to_mpv import fnmatch as _fnmatch from . import register @@ -21,7 +23,7 @@ import pipeline as ctx from helper import hydrus as hydrus_wrapper from ._shared import Cmdlet, CmdletArg, normalize_hash, looks_like_hash, create_pipe_object_result from config import resolve_output_dir, get_hydrus_url, get_hydrus_access_key -from downlow_helpers.alldebrid import AllDebridClient +from helper.alldebrid import AllDebridClient @@ -248,158 +250,63 @@ def _is_playable_in_mpv(file_path_or_ext: str, mime_type: Optional[str] = None) return False -def _get_fixed_ipc_pipe() -> str: - """Get the fixed IPC pipe name for persistent MPV connection. - - Uses a fixed name 'mpv-medeia-macina' so all playback sessions - connect to the same MPV window/process instead of creating new instances. - """ - import platform - if platform.system() == 'Windows': - return "\\\\.\\pipe\\mpv-medeia-macina" - else: - return "/tmp/mpv-medeia-macina.sock" - - -def _send_to_mpv_pipe(file_url: str, ipc_pipe: str, title: str, headers: Optional[Dict[str, str]] = None) -> bool: - """Send loadfile command to existing MPV via IPC pipe. - - Returns True if successfully sent to existing MPV, False if pipe unavailable. - """ - import json - import socket - import platform - - try: - # Prepare commands - # Use set_property for headers as loadfile options can be unreliable via IPC - header_str = "" - if headers: - header_str = ",".join([f"{k}: {v}" for k, v in headers.items()]) - - # Command 1: Set headers (or clear them) - cmd_headers = { - "command": ["set_property", "http-header-fields", header_str], - "request_id": 0 - } - - # Command 2: Load file using memory:// M3U to preserve title - # Sanitize title to avoid breaking M3U format - safe_title = title.replace("\n", " ").replace("\r", "") - m3u_content = f"#EXTM3U\n#EXTINF:-1,{safe_title}\n{file_url}\n" - - cmd_load = { - "command": ["loadfile", f"memory://{m3u_content}", "append-play"], - "request_id": 1 - } - - if platform.system() == 'Windows': - # Windows named pipes require special handling - try: - # Open in r+b to read response - with open(ipc_pipe, 'r+b', buffering=0) as pipe: - # Send headers - pipe.write((json.dumps(cmd_headers) + "\n").encode('utf-8')) - pipe.flush() - pipe.readline() # Consume response for headers - - # Send loadfile - pipe.write((json.dumps(cmd_load) + "\n").encode('utf-8')) - pipe.flush() - - # Read response - response_line = pipe.readline() - if response_line: - resp = json.loads(response_line.decode('utf-8')) - if resp.get('error') != 'success': - debug(f"[get-file] MPV error: {resp.get('error')}", file=sys.stderr) - return False - - debug(f"[get-file] Sent to existing MPV: {title}", file=sys.stderr) - return True - except (OSError, IOError): - # Pipe not available - return False - else: - # Unix socket for Linux/macOS - if not hasattr(socket, 'AF_UNIX'): - return False - - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - sock.connect(ipc_pipe) - - # Send headers - sock.sendall((json.dumps(cmd_headers) + "\n").encode('utf-8')) - sock.recv(4096) # Consume response - - # Send loadfile - sock.sendall((json.dumps(cmd_load) + "\n").encode('utf-8')) - - # Read response - try: - response_data = sock.recv(4096) - if response_data: - resp = json.loads(response_data.decode('utf-8')) - if resp.get('error') != 'success': - debug(f"[get-file] MPV error: {resp.get('error')}", file=sys.stderr) - sock.close() - return False - except: - pass - sock.close() - - debug(f"[get-file] Sent to existing MPV: {title}", file=sys.stderr) - return True - except (OSError, socket.error, ConnectionRefusedError): - # Pipe doesn't exist or MPV not listening - will need to start new instance - return False - except Exception as e: - debug(f"[get-file] IPC error: {e}", file=sys.stderr) - return False - - def _play_in_mpv(file_url: str, file_title: str, is_stream: bool = False, headers: Optional[Dict[str, str]] = None) -> bool: - """Play file in MPV using IPC pipe, creating new instance if needed. + """Play file in MPV using centralized IPC pipe, creating new instance if needed. Returns True on success, False on error. """ - ipc_pipe = _get_fixed_ipc_pipe() - import json - import socket - import platform - try: # First try to send to existing MPV instance - if _send_to_mpv_pipe(file_url, ipc_pipe, file_title, headers): + if send_to_mpv(file_url, file_title, headers): debug(f"Added to MPV: {file_title}") return True # No existing MPV or pipe unavailable - start new instance + ipc_pipe = get_ipc_pipe_path() debug(f"[get-file] Starting new MPV instance (pipe: {ipc_pipe})", file=sys.stderr) - cmd = ['mpv', file_url, f'--input-ipc-server={ipc_pipe}'] - # Set title for new instance - cmd.append(f'--force-media-title={file_title}') + # Build command - start MPV without a file initially, just with IPC server + cmd = ['mpv', f'--input-ipc-server={ipc_pipe}'] if headers: # Format headers for command line # --http-header-fields="Header1: Val1,Header2: Val2" header_str = ",".join([f"{k}: {v}" for k, v in headers.items()]) cmd.append(f'--http-header-fields={header_str}') - + + # Add --idle flag so MPV stays running and waits for playlist commands + cmd.append('--idle') + # Detach process to prevent freezing parent CLI kwargs = {} if platform.system() == 'Windows': - # CREATE_NEW_CONSOLE might be better than CREATE_NO_WINDOW if MPV needs a window - # But usually MPV creates its own window. - # DETACHED_PROCESS (0x00000008) is also an option. - kwargs['creationflags'] = 0x00000008 # DETACHED_PROCESS + kwargs['creationflags'] = 0x00000008 # DETACHED_PROCESS _subprocess.Popen(cmd, stdin=_subprocess.DEVNULL, stdout=_subprocess.DEVNULL, stderr=_subprocess.DEVNULL, **kwargs) - debug(f"{'Streaming' if is_stream else 'Playing'} in MPV: {file_title}") - debug(f"[get-file] Started MPV with {file_title} (IPC: {ipc_pipe})", file=sys.stderr) - return True + debug(f"[get-file] Started MPV instance (IPC: {ipc_pipe})", file=sys.stderr) + + # Give MPV time to start and open IPC pipe + # Windows needs more time than Unix + wait_time = 1.0 if platform.system() == 'Windows' else 0.5 + debug(f"[get-file] Waiting {wait_time}s for MPV to initialize IPC...", file=sys.stderr) + _time.sleep(wait_time) + + # Try up to 3 times to send the file via IPC + for attempt in range(3): + debug(f"[get-file] Sending file via IPC (attempt {attempt + 1}/3)", file=sys.stderr) + if send_to_mpv(file_url, file_title, headers): + debug(f"{'Streaming' if is_stream else 'Playing'} in MPV: {file_title}") + debug(f"[get-file] Added to new MPV instance (IPC: {ipc_pipe})", file=sys.stderr) + return True + + if attempt < 2: + # Wait before retrying + _time.sleep(0.3) + + # IPC send failed after all retries + log("Error: Could not send file to MPV via IPC after startup", file=sys.stderr) + return False except FileNotFoundError: log("Error: MPV not found. Install mpv to play media files", file=sys.stderr) @@ -516,7 +423,7 @@ def _handle_hydrus_file(file_hash: Optional[str], file_title: str, config: Dict[ if force_browser: # User explicitly wants browser - ipc_pipe = _get_fixed_ipc_pipe() + ipc_pipe = get_ipc_pipe_path() result_dict = create_pipe_object_result( source='hydrus', identifier=file_hash, @@ -559,7 +466,7 @@ def _handle_hydrus_file(file_hash: Optional[str], file_title: str, config: Dict[ return 0 else: # Not media, open in browser - ipc_pipe = _get_fixed_ipc_pipe() + ipc_pipe = get_ipc_pipe_path() result_dict = create_pipe_object_result( source='hydrus', identifier=file_hash, @@ -1193,7 +1100,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: # Normal file export (happens regardless of -metadata flag) try: - from downlow_helpers.hydrus import hydrus_export as _hydrus_export + from helper.hydrus import hydrus_export as _hydrus_export except Exception: _hydrus_export = None # type: ignore if _hydrus_export is None: diff --git a/cmdlets/get_tag.py b/cmdlets/get_tag.py index b0c6b27..c43219e 100644 --- a/cmdlets/get_tag.py +++ b/cmdlets/get_tag.py @@ -49,6 +49,7 @@ class TagItem: hash_hex: Optional[str] = None source: str = "hydrus" service_name: Optional[str] = None + file_path: Optional[str] = None def __post_init__(self): # Make ResultTable happy by adding standard fields @@ -101,7 +102,9 @@ def _emit_tags_as_table( hash_hex: Optional[str], source: str = "hydrus", service_name: Optional[str] = None, - config: Dict[str, Any] = None + config: Dict[str, Any] = None, + item_title: Optional[str] = None, + file_path: Optional[str] = None ) -> None: """Emit tags as TagItem objects and display via ResultTable. @@ -111,7 +114,13 @@ def _emit_tags_as_table( from result_table import ResultTable # Create ResultTable with just tag column (no title) - table = ResultTable("Tags", max_columns=1) + table_title = "Tags" + if item_title: + table_title = f"Tags: {item_title}" + if hash_hex: + table_title += f" [{hash_hex[:8]}]" + + table = ResultTable(table_title, max_columns=1) table.set_source_command("get-tag", []) # Create TagItem for each tag @@ -123,6 +132,7 @@ def _emit_tags_as_table( hash_hex=hash_hex, source=source, service_name=service_name, + file_path=file_path, ) tag_items.append(tag_item) table.add_result(tag_item) @@ -1069,6 +1079,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: # Try Hydrus first (always prioritize if available and has hash) use_hydrus = False hydrus_meta = None # Cache the metadata from first fetch + client = None if hash_hex and hydrus_available: try: client = hydrus.get_client(config) @@ -1093,7 +1104,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: try: # Use cached metadata from above, don't fetch again service_name = hydrus.get_tag_service_name(config) - client = hydrus.get_client(config) + if client is None: + client = hydrus.get_client(config) service_key = hydrus.get_tag_service_key(client, service_name) current = _extract_my_tags_from_hydrus_meta(hydrus_meta, service_key, service_name) source = "hydrus" @@ -1148,10 +1160,13 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: return 1 # Always output to ResultTable (pipeline mode only) + # Extract title for table header + item_title = get_field(result, "title", None) or get_field(result, "name", None) or get_field(result, "filename", None) + if source == "hydrus": - _emit_tags_as_table(current, hash_hex=hash_hex, source="hydrus", service_name=service_name, config=config) + _emit_tags_as_table(current, hash_hex=hash_hex, source="hydrus", service_name=service_name, config=config, item_title=item_title) else: - _emit_tags_as_table(current, hash_hex=hash_hex, source="local", service_name=None, config=config) + _emit_tags_as_table(current, hash_hex=hash_hex, source="local", service_name=None, config=config, item_title=item_title, file_path=str(local_path) if local_path else None) # If emit requested or store key provided, emit payload if emit_mode: diff --git a/cmdlets/output_json.py b/cmdlets/output_json.py new file mode 100644 index 0000000..32b85ab --- /dev/null +++ b/cmdlets/output_json.py @@ -0,0 +1,14 @@ +from typing import Any, Dict, Sequence +import json +from ._shared import Cmdlet + +def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: + """Output the current pipeline result as JSON.""" + print(json.dumps(result, indent=2, default=str)) + return 0 + +CMDLET = Cmdlet( + name="output-json", + summary="Output the current pipeline result as JSON.", + usage="... | output-json", +) diff --git a/cmdlets/pipe.py b/cmdlets/pipe.py index 1f50439..4804456 100644 --- a/cmdlets/pipe.py +++ b/cmdlets/pipe.py @@ -8,92 +8,124 @@ import subprocess from ._shared import Cmdlet, CmdletArg, parse_cmdlet_args from helper.logger import log, debug from result_table import ResultTable -from .get_file import _get_fixed_ipc_pipe +from helper.mpv_ipc import get_ipc_pipe_path, MPVIPCClient import pipeline as ctx -def _send_ipc_command(command: Dict[str, Any]) -> Optional[Any]: - """Send a command to the MPV IPC pipe and return the response.""" - ipc_pipe = _get_fixed_ipc_pipe() - request = json.dumps(command) + "\n" - - try: - if platform.system() == 'Windows': - # Windows named pipe - # Opening in r+b mode to read response - try: - with open(ipc_pipe, 'r+b', buffering=0) as pipe: - pipe.write(request.encode('utf-8')) - pipe.flush() - - # Read response - # We'll try to read a line. This might block if MPV is unresponsive. - response_line = pipe.readline() - if response_line: - return json.loads(response_line.decode('utf-8')) - except FileNotFoundError: - return None # MPV not running - except Exception as e: - debug(f"Windows IPC Error: {e}", file=sys.stderr) - return None - else: - # Unix socket - af_unix = getattr(socket, 'AF_UNIX', None) - if af_unix is None: - debug("Unix sockets not supported on this platform", file=sys.stderr) - return None - - try: - sock = socket.socket(af_unix, socket.SOCK_STREAM) - sock.settimeout(2.0) - sock.connect(ipc_pipe) - sock.sendall(request.encode('utf-8')) - - # Read response - response_data = b"" - while True: - try: - chunk = sock.recv(4096) - if not chunk: - break - response_data += chunk - if b"\n" in chunk: - break - except socket.timeout: - break - - sock.close() - - if response_data: - # Parse lines, look for response to our request - lines = response_data.decode('utf-8').strip().split('\n') - for line in lines: - try: - resp = json.loads(line) - # If it has 'error' field, it's a response - if 'error' in resp: - return resp - except: - pass - except (FileNotFoundError, ConnectionRefusedError): - return None # MPV not running - except Exception as e: - debug(f"Unix IPC Error: {e}", file=sys.stderr) - return None - - except Exception as e: - debug(f"IPC Error: {e}", file=sys.stderr) - return None - - return None +from helper.local_library import LocalLibrarySearchOptimizer +from config import get_local_storage_path -def _get_playlist() -> List[Dict[str, Any]]: - """Get the current playlist from MPV.""" +def _send_ipc_command(command: Dict[str, Any], silent: bool = False) -> Optional[Any]: + """Send a command to the MPV IPC pipe and return the response.""" + try: + ipc_pipe = get_ipc_pipe_path() + client = MPVIPCClient(socket_path=ipc_pipe) + + if not client.connect(): + return None # MPV not running + + response = client.send_command(command) + client.disconnect() + return response + except Exception as e: + if not silent: + debug(f"IPC Error: {e}", file=sys.stderr) + return None + +def _get_playlist(silent: bool = False) -> Optional[List[Dict[str, Any]]]: + """Get the current playlist from MPV. Returns None if MPV is not running.""" cmd = {"command": ["get_property", "playlist"], "request_id": 100} - resp = _send_ipc_command(cmd) - if resp and resp.get("error") == "success": + resp = _send_ipc_command(cmd, silent=silent) + if resp is None: + return None + if resp.get("error") == "success": return resp.get("data", []) return [] +def _extract_title_from_item(item: Dict[str, Any]) -> str: + """Extract a clean title from an MPV playlist item, handling memory:// M3U hacks.""" + title = item.get("title") + filename = item.get("filename") or "" + + # Special handling for memory:// M3U playlists (used to pass titles via IPC) + if "memory://" in filename and "#EXTINF:" in filename: + try: + # Extract title from #EXTINF:-1,Title + # Use regex to find title between #EXTINF:-1, and newline + match = re.search(r"#EXTINF:-1,(.*?)(?:\n|\r|$)", filename) + if match: + extracted_title = match.group(1).strip() + if not title or title == "memory://": + title = extracted_title + + # If we still don't have a title, try to find the URL in the M3U content + if not title: + lines = filename.splitlines() + for line in lines: + line = line.strip() + if line and not line.startswith('#') and not line.startswith('memory://'): + # Found the URL, use it as title + return line + except Exception: + pass + + return title or filename or "Unknown" + +def _queue_items(items: List[Any], clear_first: bool = False) -> None: + """Queue items to MPV, starting it if necessary. + + Args: + items: List of items to queue + clear_first: If True, the first item will replace the current playlist + """ + for i, item in enumerate(items): + # Extract URL/Path + target = None + title = None + + if isinstance(item, dict): + target = item.get("target") or item.get("url") or item.get("path") or item.get("filename") + title = item.get("title") or item.get("name") + elif hasattr(item, "target"): + target = item.target + title = getattr(item, "title", None) + elif isinstance(item, str): + target = item + + if target: + # Add to MPV playlist + # We use loadfile with append flag (or replace if clear_first is set) + + # Use memory:// M3U hack to pass title to MPV + if title: + # Sanitize title for M3U (remove newlines) + safe_title = title.replace('\n', ' ').replace('\r', '') + m3u_content = f"#EXTM3U\n#EXTINF:-1,{safe_title}\n{target}" + target_to_send = f"memory://{m3u_content}" + else: + target_to_send = target + + mode = "append" + if clear_first and i == 0: + mode = "replace" + + cmd = {"command": ["loadfile", target_to_send, mode], "request_id": 200} + resp = _send_ipc_command(cmd) + + if resp is None: + # MPV not running (or died) + # Start MPV with remaining items + _start_mpv(items[i:]) + return + elif resp.get("error") == "success": + # Also set property for good measure + if title: + title_cmd = {"command": ["set_property", "force-media-title", title], "request_id": 201} + _send_ipc_command(title_cmd) + debug(f"Queued: {title or target}") + else: + error_msg = str(resp.get('error')) + debug(f"Failed to queue item: {error_msg}", file=sys.stderr) + def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: """Manage and play items in the MPV playlist via IPC.""" @@ -106,7 +138,115 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: list_mode = parsed.get("list") play_mode = parsed.get("play") pause_mode = parsed.get("pause") + save_mode = parsed.get("save") + load_mode = parsed.get("load") + # Handle Save Playlist + if save_mode: + playlist_name = index_arg or f"Playlist {subprocess.check_output(['date', '/t'], shell=True).decode().strip()}" + # If index_arg was used for name, clear it so it doesn't trigger index logic + if index_arg: + index_arg = None + + items = _get_playlist() + if not items: + debug("Cannot save: MPV playlist is empty or MPV is not running.") + return 1 + + # Clean up items for saving (remove current flag, etc) + clean_items = [] + for item in items: + # If title was extracted from memory://, we should probably save the original filename + # if it's a URL, or reconstruct a clean object. + # Actually, _extract_title_from_item handles the display title. + # But for playback, we need the 'filename' (which might be memory://...) + # If we save 'memory://...', it will work when loaded back. + clean_items.append(item) + + # Use config from context or load it + config_data = config if config else {} + + storage_path = get_local_storage_path(config_data) + if not storage_path: + debug("Local storage path not configured.") + return 1 + + with LocalLibrarySearchOptimizer(storage_path) as db: + if db.save_playlist(playlist_name, clean_items): + debug(f"Playlist saved as '{playlist_name}'") + return 0 + else: + debug(f"Failed to save playlist '{playlist_name}'") + return 1 + + # Handle Load Playlist + current_playlist_name = None + if load_mode: + # Use config from context or load it + config_data = config if config else {} + + storage_path = get_local_storage_path(config_data) + if not storage_path: + debug("Local storage path not configured.") + return 1 + + with LocalLibrarySearchOptimizer(storage_path) as db: + if index_arg: + try: + pl_id = int(index_arg) + result = db.get_playlist_by_id(pl_id) + if result is None: + debug(f"Playlist ID {pl_id} not found.") + return 1 + + name, items = result + current_playlist_name = name + + # Queue items (replacing current playlist) + if items: + _queue_items(items, clear_first=True) + else: + # Empty playlist, just clear + _send_ipc_command({"command": ["playlist-clear"]}, silent=True) + + # Switch to list mode to show the result + list_mode = True + index_arg = None + # Fall through to list logic + + except ValueError: + debug(f"Invalid playlist ID: {index_arg}") + return 1 + else: + playlists = db.get_playlists() + + if not playlists: + debug("No saved playlists found.") + return 0 + + table = ResultTable("Saved Playlists") + for i, pl in enumerate(playlists): + item_count = len(pl.get('items', [])) + row = table.add_row() + # row.add_column("ID", str(pl['id'])) # Hidden as per user request + row.add_column("Name", pl['name']) + row.add_column("Items", str(item_count)) + row.add_column("Updated", pl['updated_at']) + + # Set the playlist items as the result object for this row + # When user selects @N, they get the list of items + # We also set the source command to .pipe -load so it loads it + table.set_row_selection_args(i, ["-load", str(pl['id'])]) + + table.set_source_command(".pipe") + + # Register results + ctx.set_last_result_table_overlay(table, [p['items'] for p in playlists]) + ctx.set_current_stage_table(table) + + print(table) + return 0 + # Handle Play/Pause commands if play_mode: cmd = {"command": ["set_property", "pause", False], "request_id": 103} @@ -148,64 +288,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: elif isinstance(result, dict): items_to_add = [result] - added_count = 0 - for i, item in enumerate(items_to_add): - # Extract URL/Path - target = None - title = None - - if isinstance(item, dict): - target = item.get("target") or item.get("url") or item.get("path") - title = item.get("title") or item.get("name") - elif hasattr(item, "target"): - target = item.target - title = getattr(item, "title", None) - elif isinstance(item, str): - target = item - - if target: - # Add to MPV playlist - # We use loadfile with append flag - - # Use memory:// M3U hack to pass title to MPV - # This avoids "invalid parameter" errors with loadfile options - # and ensures the title is displayed in the playlist/window - if title: - # Sanitize title for M3U (remove newlines) - safe_title = title.replace('\n', ' ').replace('\r', '') - m3u_content = f"#EXTM3U\n#EXTINF:-1,{safe_title}\n{target}" - target_to_send = f"memory://{m3u_content}" - else: - target_to_send = target - - cmd = {"command": ["loadfile", target_to_send, "append"], "request_id": 200} - resp = _send_ipc_command(cmd) - - if resp is None: - # MPV not running (or died) - # Start MPV with remaining items - _start_mpv(items_to_add[i:]) - return 0 - elif resp.get("error") == "success": - added_count += 1 - if title: - debug(f"Queued: {title}") - else: - debug(f"Queued: {target}") - else: - error_msg = str(resp.get('error')) - debug(f"Failed to queue item: {error_msg}", file=sys.stderr) - - # If error indicates parameter issues, try without options - # (Though memory:// should avoid this, we keep fallback just in case) - if "option" in error_msg or "parameter" in error_msg: - cmd = {"command": ["loadfile", target, "append"], "request_id": 201} - resp = _send_ipc_command(cmd) - if resp and resp.get("error") == "success": - added_count += 1 - debug(f"Queued (fallback): {title or target}") + _queue_items(items_to_add) - if added_count > 0: + if items_to_add: # If we added items, we might want to play the first one if nothing is playing? # For now, just list the playlist pass @@ -213,8 +298,13 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: # Get playlist from MPV items = _get_playlist() + if items is None: + debug("MPV is not running. Starting new instance...") + _start_mpv([]) + return 0 + if not items: - debug("MPV playlist is empty or MPV is not running.") + debug("MPV playlist is empty.") return 0 # If index is provided, perform action (Play or Clear) @@ -228,7 +318,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: return 1 item = items[idx] - title = item.get("title") or item.get("filename") or "Unknown" + title = _extract_title_from_item(item) if clear_mode: # Remove item @@ -237,7 +327,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: if resp and resp.get("error") == "success": debug(f"Removed: {title}") # Refresh items for listing - items = _get_playlist() + items = _get_playlist() or [] list_mode = True index_arg = None else: @@ -268,46 +358,26 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: debug("MPV playlist is empty.") return 0 - table = ResultTable("MPV Playlist") + # Use the loaded playlist name if available, otherwise default + # Note: current_playlist_name is defined in the load_mode block if a playlist was loaded + try: + table_title = current_playlist_name or "MPV Playlist" + except NameError: + table_title = "MPV Playlist" + + table = ResultTable(table_title) for i, item in enumerate(items): is_current = item.get("current", False) - title = item.get("title") or "" - filename = item.get("filename") or "" - - # Special handling for memory:// M3U playlists (used to pass titles via IPC) - if "memory://" in filename and "#EXTINF:" in filename: - try: - # Extract title from #EXTINF:-1,Title - # Use regex to find title between #EXTINF:-1, and newline - match = re.search(r"#EXTINF:-1,(.*?)(?:\n|\r|$)", filename) - if match: - extracted_title = match.group(1).strip() - if not title or title == "memory://": - title = extracted_title - - # Extract actual URL - # Find the first line that looks like a URL and not a directive - lines = filename.splitlines() - for line in lines: - line = line.strip() - if line and not line.startswith('#') and not line.startswith('memory://'): - filename = line - break - except Exception: - pass + title = _extract_title_from_item(item) # Truncate if too long - if len(title) > 57: - title = title[:57] + "..." - if len(filename) > 27: - filename = filename[:27] + "..." + if len(title) > 80: + title = title[:77] + "..." row = table.add_row() - row.add_column("#", str(i + 1)) row.add_column("Current", "*" if is_current else "") row.add_column("Title", title) - row.add_column("Filename", filename) table.set_row_selection_args(i, [str(i + 1)]) @@ -323,9 +393,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: def _start_mpv(items: List[Any]) -> None: """Start MPV with a list of items.""" - ipc_pipe = _get_fixed_ipc_pipe() + ipc_pipe = get_ipc_pipe_path() - cmd = ['mpv', f'--input-ipc-server={ipc_pipe}'] + cmd = ['mpv', f'--input-ipc-server={ipc_pipe}', '--idle', '--force-window'] cmd.append('--ytdl-format=bestvideo[height<=?1080]+bestaudio/best[height<=?1080]') # Add items @@ -334,7 +404,7 @@ def _start_mpv(items: List[Any]) -> None: title = None if isinstance(item, dict): - target = item.get("target") or item.get("url") or item.get("path") + target = item.get("target") or item.get("url") or item.get("path") or item.get("filename") title = item.get("title") or item.get("name") elif hasattr(item, "target"): target = item.target @@ -351,16 +421,15 @@ def _start_mpv(items: List[Any]) -> None: else: cmd.append(target) - if len(cmd) > 3: # mpv + ipc + format + at least one file - try: - kwargs = {} - if platform.system() == 'Windows': - kwargs['creationflags'] = 0x00000008 # DETACHED_PROCESS - - subprocess.Popen(cmd, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, **kwargs) - debug(f"Started MPV with {len(cmd)-3} items") - except Exception as e: - debug(f"Error starting MPV: {e}", file=sys.stderr) + try: + kwargs = {} + if platform.system() == 'Windows': + kwargs['creationflags'] = 0x00000008 # DETACHED_PROCESS + + subprocess.Popen(cmd, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL, **kwargs) + debug(f"Started MPV with {len(items)} items") + except Exception as e: + debug(f"Error starting MPV: {e}", file=sys.stderr) CMDLET = Cmdlet( name=".pipe", @@ -394,6 +463,16 @@ CMDLET = Cmdlet( type="flag", description="Pause playback" ), + CmdletArg( + name="save", + type="flag", + description="Save current playlist to database" + ), + CmdletArg( + name="load", + type="flag", + description="List saved playlists" + ), ], exec=_run ) diff --git a/cmdlets/screen_shot.py b/cmdlets/screen_shot.py index 856de49..6534fe2 100644 --- a/cmdlets/screen_shot.py +++ b/cmdlets/screen_shot.py @@ -9,6 +9,7 @@ from __future__ import annotations import contextlib import hashlib import importlib +import json import sys import time import httpx @@ -17,8 +18,9 @@ from pathlib import Path from typing import Any, Dict, List, Optional, Sequence, Tuple from urllib.parse import urlsplit, quote, urljoin -from helper.logger import log +from helper.logger import log, debug from helper.http_client import HTTPClient +from helper.utils import ensure_directory, unique_path, unique_preserve_order from . import register from ._shared import Cmdlet, CmdletArg, SharedArgs, create_pipe_object_result, normalize_result_input @@ -70,6 +72,38 @@ USER_AGENT = ( DEFAULT_VIEWPORT: ViewportSize = {"width": 1280, "height": 1200} ARCHIVE_TIMEOUT = 30.0 +# Configurable selectors for specific websites +SITE_SELECTORS: Dict[str, List[str]] = { + "twitter.com": [ + "article[role='article']", + "div[data-testid='tweet']", + "div[data-testid='cellInnerDiv'] article", + ], + "x.com": [ + "article[role='article']", + "div[data-testid='tweet']", + "div[data-testid='cellInnerDiv'] article", + ], + "instagram.com": [ + "article[role='presentation']", + "article[role='article']", + "div[role='dialog'] article", + "section main article", + ], + "reddit.com": [ + "shreddit-post", + "div[data-testid='post-container']", + "div[data-click-id='background']", + "article", + ], + "rumble.com": [ + "rumble-player, iframe.rumble", + "div.video-item--main", + "main article", + ], +} + + class ScreenshotError(RuntimeError): """Raised when screenshot capture or upload fails.""" @@ -113,39 +147,6 @@ class ScreenshotResult: # Helper Functions # ============================================================================ -def _ensure_directory(path: Path) -> None: - """Ensure directory exists.""" - if not isinstance(path, Path): - path = Path(path) - path.mkdir(parents=True, exist_ok=True) - - -def _unique_path(path: Path) -> Path: - """Get unique path by appending numbers if file exists.""" - if not path.exists(): - return path - stem = path.stem - suffix = path.suffix - parent = path.parent - counter = 1 - while True: - new_path = parent / f"{stem}_{counter}{suffix}" - if not new_path.exists(): - return new_path - counter += 1 - - -def _unique_preserve_order(items: Sequence[str]) -> List[str]: - """Remove duplicates while preserving order.""" - seen = set() - result = [] - for item in items: - if item not in seen: - seen.add(item) - result.append(item) - return result - - def _slugify_url(url: str) -> str: """Convert URL to filesystem-safe slug.""" parsed = urlsplit(url) @@ -180,36 +181,11 @@ def _selectors_for_url(url: str) -> List[str]: """Return a list of likely content selectors for known platforms.""" u = url.lower() sels: List[str] = [] - # Twitter/X - if "twitter.com" in u or "x.com" in u: - sels.extend([ - "article[role='article']", - "div[data-testid='tweet']", - "div[data-testid='cellInnerDiv'] article", - ]) - # Instagram - if "instagram.com" in u: - sels.extend([ - "article[role='presentation']", - "article[role='article']", - "div[role='dialog'] article", - "section main article", - ]) - # Reddit - if "reddit.com" in u: - sels.extend([ - "shreddit-post", - "div[data-testid='post-container']", - "div[data-click-id='background']", - "article", - ]) - # Rumble (video post) - if "rumble.com" in u: - sels.extend([ - "rumble-player, iframe.rumble", - "div.video-item--main", - "main article", - ]) + + for domain, selectors in SITE_SELECTORS.items(): + if domain in u: + sels.extend(selectors) + return sels or ["article"] @@ -321,7 +297,7 @@ def _archive_url(url: str, timeout: float) -> Tuple[List[str], List[str]]: def _prepare_output_path(options: ScreenshotOptions) -> Path: """Prepare and validate output path for screenshot.""" - _ensure_directory(options.output_dir) + ensure_directory(options.output_dir) explicit_format = _normalise_format(options.output_format) if options.output_format else None inferred_format: Optional[str] = None if options.output_path is not None: @@ -344,20 +320,23 @@ def _prepare_output_path(options: ScreenshotOptions) -> Path: if current_suffix != expected: path = path.with_suffix(expected) options.output_format = final_format - return _unique_path(path) + return unique_path(path) -def _capture_with_playwright(options: ScreenshotOptions, destination: Path, warnings: List[str]) -> None: +def _capture(options: ScreenshotOptions, destination: Path, warnings: List[str]) -> None: """Capture screenshot using Playwright.""" + debug(f"[_capture] Starting capture for {options.url} -> {destination}") playwright = None browser = None context = None try: - log("Starting Playwright...", flush=True) + debug("Starting Playwright...", flush=True) playwright = sync_playwright().start() log("Launching Chromium browser...", flush=True) format_name = _normalise_format(options.output_format) headless = options.headless or format_name == "pdf" + debug(f"[_capture] Format: {format_name}, Headless: {headless}") + if format_name == "pdf" and not options.headless: warnings.append("pdf output requires headless Chromium; overriding headless mode") browser = playwright.chromium.launch( @@ -413,11 +392,14 @@ def _capture_with_playwright(options: ScreenshotOptions, destination: Path, warn log("Attempting platform-specific content capture...", flush=True) try: _platform_preprocess(options.url, page, warnings) - except Exception: + except Exception as e: + debug(f"[_capture] Platform preprocess failed: {e}") pass selectors = list(options.target_selectors or []) if not selectors: selectors = _selectors_for_url(options.url) + + debug(f"[_capture] Trying selectors: {selectors}") for sel in selectors: try: log(f"Trying selector: {sel}", flush=True) @@ -466,6 +448,7 @@ def _capture_with_playwright(options: ScreenshotOptions, destination: Path, warn page.screenshot(**screenshot_kwargs) log(f"Screenshot saved to {destination}", flush=True) except Exception as exc: + debug(f"[_capture] Exception: {exc}") raise ScreenshotError(f"Failed to capture screenshot: {exc}") from exc finally: log("Cleaning up browser resources...", flush=True) @@ -483,20 +466,22 @@ def _capture_with_playwright(options: ScreenshotOptions, destination: Path, warn def _capture_screenshot(options: ScreenshotOptions) -> ScreenshotResult: """Capture a screenshot for the given options.""" + debug(f"[_capture_screenshot] Preparing capture for {options.url}") destination = _prepare_output_path(options) warnings: List[str] = [] - _capture_with_playwright(options, destination, warnings) + _capture(options, destination, warnings) - known_urls = _unique_preserve_order([options.url, *options.known_urls]) + known_urls = unique_preserve_order([options.url, *options.known_urls]) archive_urls: List[str] = [] if options.archive: + debug(f"[_capture_screenshot] Archiving enabled for {options.url}") archives, archive_warnings = _archive_url(options.url, options.archive_timeout) archive_urls.extend(archives) warnings.extend(archive_warnings) if archives: - known_urls = _unique_preserve_order([*known_urls, *archives]) + known_urls = unique_preserve_order([*known_urls, *archives]) - applied_tags = _unique_preserve_order(list(tag for tag in options.tags if tag.strip())) + applied_tags = unique_preserve_order(list(tag for tag in options.tags if tag.strip())) return ScreenshotResult( path=destination, @@ -530,6 +515,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: """ from ._shared import parse_cmdlet_args + debug(f"[_run] screen-shot invoked with args: {args}") + # Help check try: if any(str(a).lower() in {"-?", "/?", "--help", "-h", "help", "--cmdlet"} for a in args): @@ -581,6 +568,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: log(f"No URLs to process for screen-shot cmdlet", file=sys.stderr) return 1 + debug(f"[_run] URLs to process: {urls_to_process}") + # ======================================================================== # OUTPUT DIRECTORY RESOLUTION - Priority chain # ======================================================================== @@ -617,7 +606,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: screenshot_dir = Path.home() / "Videos" log(f"[screen_shot] Using default directory: {screenshot_dir}", flush=True) - _ensure_directory(screenshot_dir) + ensure_directory(screenshot_dir) # ======================================================================== # PREPARE SCREENSHOT OPTIONS diff --git a/cmdlets/search_file.py b/cmdlets/search_file.py index 33accf7..58f4e01 100644 --- a/cmdlets/search_file.py +++ b/cmdlets/search_file.py @@ -249,6 +249,20 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: try: results_list = [] + import result_table + import importlib + importlib.reload(result_table) + from result_table import ResultTable + + # Create ResultTable for display + table_title = f"Search: {query}" + if provider_name: + table_title += f" [{provider_name}]" + elif storage_backend: + table_title += f" [{storage_backend}]" + + table = ResultTable(table_title) + table.set_source_command("search-file", args_list) # Try to search using provider (libgen, soulseek, debrid, openlibrary) if provider_name: @@ -264,10 +278,17 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: debug(f"[search_file] Provider search returned {len(search_result)} results") for item in search_result: + # Add to table + table.add_result(item) + + # Emit to pipeline item_dict = item.to_dict() results_list.append(item_dict) ctx.emit(item_dict) + # Set the result table in context for TUI/CLI display + ctx.set_last_result_table(table, results_list) + debug(f"[search_file] Emitted {len(results_list)} results") # Write results to worker stdout @@ -316,6 +337,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: # Emit results and collect for workers table if results: for item in results: + # Add to table + table.add_result(item) + if isinstance(item, dict): normalized = _ensure_storage_columns(item) results_list.append(normalized) @@ -329,6 +353,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: results_list.append(item_dict) ctx.emit(item_dict) + # Set the result table in context for TUI/CLI display + ctx.set_last_result_table(table, results_list) + # Write results to worker stdout db.append_worker_stdout(worker_id, json.dumps(results_list, indent=2)) else: diff --git a/helper/download.py b/helper/download.py index dd6473e..fc43ee4 100644 --- a/helper/download.py +++ b/helper/download.py @@ -63,7 +63,7 @@ def _progress_callback(status: Dict[str, Any]) -> None: sys.stdout.write("\r" + " " * 70 + "\r") sys.stdout.flush() # Log finished message (visible) - log(f"✓ Download finished: {status.get('filename')}") + debug(f"✓ Download finished: {status.get('filename')}") elif event in ("postprocessing", "processing"): debug(f"Post-processing: {status.get('postprocessor')}") @@ -629,7 +629,7 @@ def download_media( _ensure_yt_dlp_ready() ytdl_options = _build_ytdlp_options(opts) - log(f"Starting yt-dlp download: {opts.url}") + debug(f"Starting yt-dlp download: {opts.url}") if debug_logger is not None: debug_logger.write_record("ytdlp-start", {"url": opts.url}) @@ -707,7 +707,7 @@ def download_media( or entry.get("url") ) - log(f"✓ Downloaded: {media_path.name} ({len(tags)} tags)") + debug(f"✓ Downloaded: {media_path.name} ({len(tags)} tags)") if debug_logger is not None: debug_logger.write_record( "downloaded", diff --git a/helper/file_storage.py b/helper/file_storage.py index 12696ef..3f8f795 100644 --- a/helper/file_storage.py +++ b/helper/file_storage.py @@ -50,6 +50,10 @@ class StorageBackend(ABC): Exception: If upload fails """ + @abstractmethod + def get_name(self) -> str: + """Get the unique name of this backend.""" + def search(self, query: str, **kwargs: Any) -> list[Dict[str, Any]]: """Search for files in backends that support it. @@ -125,7 +129,7 @@ class LocalStorageBackend(StorageBackend): try: # Compute file hash file_hash = sha256_file(file_path) - log(f"File hash: {file_hash}", file=sys.stderr) + debug(f"File hash: {file_hash}", file=sys.stderr) dest_dir = Path(location).expanduser() dest_dir.mkdir(parents=True, exist_ok=True) @@ -148,13 +152,13 @@ class LocalStorageBackend(StorageBackend): if move_file: shutil.move(str(file_path), dest_file) - log(f"✅ Local move: {dest_file}", file=sys.stderr) + debug(f"Local move: {dest_file}", file=sys.stderr) else: shutil.copy2(file_path, dest_file) - log(f"✅ Local copy: {dest_file}", file=sys.stderr) + debug(f"Local copy: {dest_file}", file=sys.stderr) return str(dest_file) except Exception as exc: - log(f"❌ Local copy failed: {exc}", file=sys.stderr) + debug(f"Local copy failed: {exc}", file=sys.stderr) raise def search(self, query: str, **kwargs: Any) -> list[Dict[str, Any]]: @@ -200,7 +204,6 @@ class LocalStorageBackend(StorageBackend): # Try database search first (much faster than filesystem scan) try: - debug(f"Connecting to local library DB at {search_dir}") db = LocalLibraryDB(search_dir) cursor = db.connection.cursor() @@ -261,8 +264,9 @@ class LocalStorageBackend(StorageBackend): all_tags = [row[0] for row in cursor.fetchall()] results.append({ - "name": file_path.name, - "title": file_path.name, + "name": file_path.stem, + "title": file_path.stem, + "ext": file_path.suffix.lstrip('.'), "path": path_str, "target": path_str, "origin": "local", @@ -284,35 +288,60 @@ class LocalStorageBackend(StorageBackend): # 2. Simple tags (without namespace) containing the query # NOTE: Does NOT match namespaced tags (e.g., "joe" won't match "channel:Joe Mullan") # Use explicit namespace search for that (e.g., "channel:joe*") - query_pattern = f"%{query_lower}%" - debug(f"Performing filename/tag search: {query_pattern}") + + # Split query into terms for AND logic + terms = [t.strip() for t in query_lower.replace(',', ' ').split() if t.strip()] + if not terms: + terms = [query_lower] + + debug(f"Performing filename/tag search for terms: {terms}") # Fetch more results than requested to allow for filtering fetch_limit = (limit or 45) * 50 - cursor.execute(""" + # 1. Filename search (AND logic) + conditions = ["LOWER(f.file_path) LIKE ?" for _ in terms] + params = [f"%{t}%" for t in terms] + where_clause = " AND ".join(conditions) + + cursor.execute(f""" SELECT DISTINCT f.id, f.file_path, f.file_size FROM files f - WHERE LOWER(f.file_path) LIKE ? + WHERE {where_clause} ORDER BY f.file_path LIMIT ? - """, (query_pattern, fetch_limit)) + """, (*params, fetch_limit)) rows = cursor.fetchall() debug(f"Found {len(rows)} filename matches in DB (before whole-word filter)") - # Compile regex for whole word matching - try: - word_regex = re.compile(r'\b' + re.escape(query_lower) + r'\b', re.IGNORECASE) - except Exception: - word_regex = None + # Compile regex for whole word matching (only if single term, otherwise skip) + word_regex = None + if len(terms) == 1: + term = terms[0] + # Check if term contains wildcard characters + has_wildcard = '*' in term or '?' in term + + if has_wildcard: + # Use fnmatch for wildcard patterns (e.g., "sie*" matches "SiebeliebenWohl...") + try: + from fnmatch import translate + word_regex = re.compile(translate(term), re.IGNORECASE) + except Exception: + word_regex = None + else: + # Use word boundary for exact terms (backwards compatibility) + try: + word_regex = re.compile(r'\b' + re.escape(term) + r'\b', re.IGNORECASE) + except Exception: + word_regex = None seen_files = set() for file_id, file_path_str, size_bytes in rows: if not file_path_str or file_path_str in seen_files: continue - # Apply whole word filter on filename + # Apply whole word filter on filename if single term if word_regex: p = Path(file_path_str) if not word_regex.search(p.name): @@ -332,8 +361,9 @@ class LocalStorageBackend(StorageBackend): tags = [row[0] for row in cursor.fetchall()] results.append({ - "name": file_path.name, - "title": file_path.name, + "name": file_path.stem, + "title": file_path.stem, + "ext": file_path.suffix.lstrip('.'), "path": path_str, "target": path_str, "origin": "local", @@ -343,6 +373,12 @@ class LocalStorageBackend(StorageBackend): }) # Also search for simple tags (without namespace) containing the query + # Only perform tag search if single term, or if we want to support multi-term tag search + # For now, fallback to single pattern search for tags if multiple terms + # (searching for a tag that contains "term1 term2" or "term1,term2") + # This is less useful for AND logic across multiple tags, but consistent with previous behavior + query_pattern = f"%{query_lower}%" + cursor.execute(""" SELECT DISTINCT f.id, f.file_path, f.file_size FROM files f @@ -371,8 +407,9 @@ class LocalStorageBackend(StorageBackend): tags = [row[0] for row in cursor.fetchall()] results.append({ - "name": file_path.name, - "title": file_path.name, + "name": file_path.stem, + "title": file_path.stem, + "ext": file_path.suffix.lstrip('.'), "path": path_str, "target": path_str, "origin": "local", @@ -409,8 +446,9 @@ class LocalStorageBackend(StorageBackend): tags = [row[0] for row in cursor.fetchall()] results.append({ - "name": file_path.name, - "title": file_path.name, + "name": file_path.stem, + "title": file_path.stem, + "ext": file_path.suffix.lstrip('.'), "path": path_str, "target": path_str, "origin": "local", @@ -434,6 +472,11 @@ class LocalStorageBackend(StorageBackend): recursive = kwargs.get("recursive", True) pattern = "**/*" if recursive else "*" + # Split query into terms for AND logic + terms = [t.strip() for t in query_lower.replace(',', ' ').split() if t.strip()] + if not terms: + terms = [query_lower] + count = 0 for file_path in search_dir.glob(pattern): if not file_path.is_file(): @@ -442,14 +485,26 @@ class LocalStorageBackend(StorageBackend): if lower_name.endswith('.tags') or lower_name.endswith('.metadata') \ or lower_name.endswith('.notes') or lower_name.endswith('.tags.txt'): continue - if not (match_all or query_lower in lower_name): - continue + + if not match_all: + # Check if ALL terms are present in the filename + # For single terms with wildcards, use fnmatch; otherwise use substring matching + if len(terms) == 1 and ('*' in terms[0] or '?' in terms[0]): + # Wildcard pattern matching for single term + from fnmatch import fnmatch + if not fnmatch(lower_name, terms[0]): + continue + else: + # Substring matching for all terms (AND logic) + if not all(term in lower_name for term in terms): + continue size_bytes = file_path.stat().st_size path_str = str(file_path) results.append({ - "name": file_path.name, - "title": file_path.name, + "name": file_path.stem, + "title": file_path.stem, + "ext": file_path.suffix.lstrip('.'), "path": path_str, "target": path_str, "origin": "local", @@ -562,7 +617,7 @@ class HydrusStorageBackend(StorageBackend): raise Exception(f"Hydrus response missing file hash: {response}") file_hash = hydrus_hash - log(f"✅ File uploaded to Hydrus: {file_hash}", file=sys.stderr) + log(f"Hydrus: {file_hash}", file=sys.stderr) # Add tags if provided if tags: @@ -654,7 +709,8 @@ class HydrusStorageBackend(StorageBackend): # Fetch metadata for the found files results = [] query_lower = query.lower().strip() - search_terms = set(query_lower.split()) # For substring matching + # Split by comma or space for AND logic + search_terms = set(query_lower.replace(',', ' ').split()) # For substring matching if file_ids: metadata = client.fetch_file_metadata(file_ids=file_ids) @@ -852,6 +908,11 @@ class DebridStorageBackend(StorageBackend): # "*" means "match all" - include all magnets match_all = query_lower == "*" + # Split query into terms for AND logic + terms = [t.strip() for t in query_lower.replace(',', ' ').split() if t.strip()] + if not terms: + terms = [query_lower] + for magnet in magnets: filename = magnet.get('filename', '').lower() status_code = magnet.get('statusCode', 0) @@ -862,8 +923,9 @@ class DebridStorageBackend(StorageBackend): continue # Match query against filename (or match all if query is "*") - if not match_all and query_lower not in filename: - continue + if not match_all: + if not all(term in filename for term in terms): + continue matching_magnet_ids.append(magnet_id) magnet_info_map[magnet_id] = magnet @@ -952,6 +1014,102 @@ class DebridStorageBackend(StorageBackend): return result +class MatrixStorageBackend(StorageBackend): + """File storage backend for Matrix (Element) chat rooms.""" + + def get_name(self) -> str: + return "matrix" + + def upload(self, file_path: Path, **kwargs: Any) -> str: + """Upload file to Matrix room. + + Requires 'config' in kwargs with 'storage.matrix' settings: + - homeserver: URL of homeserver (e.g. https://matrix.org) + - user_id: User ID (e.g. @user:matrix.org) + - access_token: Access token (preferred) OR password + - room_id: Room ID to upload to (e.g. !roomid:matrix.org) + """ + config = kwargs.get('config', {}) + if not config: + raise ValueError("Config required for Matrix upload") + + matrix_conf = config.get('storage', {}).get('matrix', {}) + if not matrix_conf: + raise ValueError("Matrix storage not configured in config.json") + + homeserver = matrix_conf.get('homeserver') + # user_id = matrix_conf.get('user_id') # Not strictly needed if we have token + access_token = matrix_conf.get('access_token') + room_id = matrix_conf.get('room_id') + + if not homeserver or not room_id: + raise ValueError("Matrix homeserver and room_id required") + + # Ensure homeserver has protocol + if not homeserver.startswith('http'): + homeserver = f"https://{homeserver}" + + # Login if no access token (optional implementation, for now assume token) + if not access_token: + raise ValueError("Matrix access_token required (login not yet implemented)") + + # 1. Upload Media + upload_url = f"{homeserver}/_matrix/media/r3/upload" + headers = { + "Authorization": f"Bearer {access_token}", + "Content-Type": "application/octet-stream" # Or guess mime type + } + + import mimetypes + mime_type, _ = mimetypes.guess_type(file_path) + if mime_type: + headers["Content-Type"] = mime_type + + filename = file_path.name + + try: + with open(file_path, 'rb') as f: + resp = requests.post(upload_url, headers=headers, data=f, params={"filename": filename}) + + if resp.status_code != 200: + raise Exception(f"Matrix upload failed: {resp.text}") + + content_uri = resp.json().get('content_uri') + if not content_uri: + raise Exception("No content_uri returned from Matrix upload") + + # 2. Send Message + send_url = f"{homeserver}/_matrix/client/r0/rooms/{room_id}/send/m.room.message" + + # Determine msgtype + msgtype = "m.file" + if mime_type: + if mime_type.startswith("image/"): msgtype = "m.image" + elif mime_type.startswith("video/"): msgtype = "m.video" + elif mime_type.startswith("audio/"): msgtype = "m.audio" + + payload = { + "msgtype": msgtype, + "body": filename, + "url": content_uri, + "info": { + "mimetype": mime_type, + "size": file_path.stat().st_size + } + } + + resp = requests.post(send_url, headers=headers, json=payload) + if resp.status_code != 200: + raise Exception(f"Matrix send message failed: {resp.text}") + + event_id = resp.json().get('event_id') + return f"matrix://{room_id}/{event_id}" + + except Exception as e: + log(f"❌ Matrix upload error: {e}", file=sys.stderr) + raise + + class FileStorage: """Unified file storage interface supporting multiple backend services. @@ -997,6 +1155,9 @@ class FileStorage: # Include Debrid backend (API key optional - will raise on use if not provided) if debrid_api_key: self._backends["debrid"] = DebridStorageBackend(api_key=debrid_api_key) + + # Include Matrix backend + self._backends["matrix"] = MatrixStorageBackend() def __getitem__(self, backend_name: str) -> StorageBackend: """Get a storage backend by name. diff --git a/helper/hydrus.py b/helper/hydrus.py index ff59d9e..e5d93fd 100644 --- a/helper/hydrus.py +++ b/helper/hydrus.py @@ -1411,7 +1411,7 @@ def get_client(config: dict[str, Any]) -> HydrusClient: cached_client = _hydrus_client_cache[cache_key] # If cached client has a session key, reuse it (don't re-acquire) if hasattr(cached_client, '_session_key') and cached_client._session_key: - debug(f"Reusing cached session key for {hydrus_url}") + # debug(f"Reusing cached session key for {hydrus_url}") return cached_client # If no session key in cache, try to get one try: diff --git a/helper/local_library.py b/helper/local_library.py index 1d4f2d9..d012428 100644 --- a/helper/local_library.py +++ b/helper/local_library.py @@ -230,6 +230,16 @@ class LocalLibraryDB: FOREIGN KEY (file_id) REFERENCES files(id) ON DELETE CASCADE ) """) + + cursor.execute(""" + CREATE TABLE IF NOT EXISTS playlists ( + id INTEGER PRIMARY KEY AUTOINCREMENT, + name TEXT UNIQUE NOT NULL, + items TEXT NOT NULL, + created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """) # Worker tracking tables (drop legacy workers table if still present) self._ensure_worker_tables(cursor) @@ -1386,6 +1396,104 @@ class LocalLibrarySearchOptimizer: """Fast tag-based search using database.""" if not self.db: return [] + + try: + cursor = self.db.connection.cursor() + cursor.execute(""" + SELECT f.file_path + FROM files f + JOIN tags t ON f.id = t.file_id + WHERE t.tag LIKE ? + LIMIT ? + """, (f"%{tag}%", limit)) + + return [Path(row[0]) for row in cursor.fetchall()] + except Exception as e: + logger.error(f"Tag search failed: {e}") + return [] + + def save_playlist(self, name: str, items: List[Dict[str, Any]]) -> bool: + """Save a playlist to the database.""" + if not self.db: + return False + try: + cursor = self.db.connection.cursor() + items_json = json.dumps(items) + cursor.execute(""" + INSERT INTO playlists (name, items, updated_at) + VALUES (?, ?, CURRENT_TIMESTAMP) + ON CONFLICT(name) DO UPDATE SET + items = excluded.items, + updated_at = CURRENT_TIMESTAMP + """, (name, items_json)) + self.db.connection.commit() + return True + except Exception as e: + logger.error(f"Failed to save playlist {name}: {e}") + return False + + def get_playlists(self) -> List[Dict[str, Any]]: + """Get all saved playlists.""" + if not self.db: + return [] + try: + cursor = self.db.connection.cursor() + cursor.execute("SELECT id, name, items, updated_at FROM playlists ORDER BY updated_at DESC") + results = [] + for row in cursor.fetchall(): + try: + items = json.loads(row['items']) + except json.JSONDecodeError: + items = [] + results.append({ + 'id': row['id'], + 'name': row['name'], + 'items': items, + 'updated_at': row['updated_at'] + }) + return results + except Exception as e: + logger.error(f"Failed to get playlists: {e}") + return [] + + def get_playlist(self, name: str) -> Optional[List[Dict[str, Any]]]: + """Get a specific playlist by name.""" + if not self.db: + return None + try: + cursor = self.db.connection.cursor() + cursor.execute("SELECT items FROM playlists WHERE name = ?", (name,)) + row = cursor.fetchone() + if row: + try: + return json.loads(row['items']) + except json.JSONDecodeError: + return [] + return None + except Exception as e: + logger.error(f"Failed to get playlist {name}: {e}") + return None + + def get_playlist_by_id(self, playlist_id: int) -> Optional[Tuple[str, List[Dict[str, Any]]]]: + """Get a specific playlist by ID. Returns (name, items).""" + if not self.db: + return None + try: + cursor = self.db.connection.cursor() + cursor.execute("SELECT name, items FROM playlists WHERE id = ?", (playlist_id,)) + row = cursor.fetchone() + if row: + try: + items = json.loads(row['items']) + return (row['name'], items) + except json.JSONDecodeError: + return (row['name'], []) + return None + except Exception as e: + logger.error(f"Failed to get playlist ID {playlist_id}: {e}") + return None + if not self.db: + return [] return self.db.search_by_tag(tag, limit) def search_by_hash(self, file_hash: str) -> Optional[Path]: diff --git a/helper/mpv_ipc.py b/helper/mpv_ipc.py new file mode 100644 index 0000000..ea12de5 --- /dev/null +++ b/helper/mpv_ipc.py @@ -0,0 +1,290 @@ +"""MPV IPC client for cross-platform communication. + +This module provides a cross-platform interface to communicate with mpv +using either named pipes (Windows) or Unix domain sockets (Linux/macOS). + +This is the central hub for all Python-mpv IPC communication. The Lua script +should use the Python CLI, which uses this module to manage mpv connections. +""" + +import json +import os +import platform +import socket +import time as _time +from typing import Any, Dict, Optional, List + +from helper.logger import debug + + +# Fixed pipe name for persistent MPV connection across all Python sessions +FIXED_IPC_PIPE_NAME = "mpv-medeia-macina" + + +class MPVIPCError(Exception): + """Raised when MPV IPC communication fails.""" + pass + + +def get_ipc_pipe_path() -> str: + """Get the fixed IPC pipe/socket path for persistent MPV connection. + + Uses a fixed name so all playback sessions connect to the same MPV + window/process instead of creating new instances. + + Returns: + Path to IPC pipe (Windows) or socket (Linux/macOS) + """ + system = platform.system() + + if system == "Windows": + return f"\\\\.\\pipe\\{FIXED_IPC_PIPE_NAME}" + elif system == "Darwin": # macOS + return f"/tmp/{FIXED_IPC_PIPE_NAME}.sock" + else: # Linux and others + return f"/tmp/{FIXED_IPC_PIPE_NAME}.sock" + + +class MPVIPCClient: + """Client for communicating with mpv via IPC socket/pipe. + + This is the unified interface for all Python code to communicate with mpv. + It handles platform-specific differences (Windows named pipes vs Unix sockets). + """ + + def __init__(self, socket_path: Optional[str] = None, timeout: float = 5.0): + """Initialize MPV IPC client. + + Args: + socket_path: Path to IPC socket/pipe. If None, uses the fixed persistent path. + timeout: Socket timeout in seconds. + """ + self.timeout = timeout + self.socket_path = socket_path or get_ipc_pipe_path() + self.sock = None + self.is_windows = platform.system() == "Windows" + + def connect(self) -> bool: + """Connect to mpv IPC socket. + + Returns: + True if connection successful, False otherwise. + """ + try: + if self.is_windows: + # Windows named pipes + try: + # Try to open the named pipe + self.sock = open(self.socket_path, 'r+b', buffering=0) + return True + except (OSError, IOError) as exc: + debug(f"Failed to connect to MPV named pipe: {exc}") + return False + else: + # Unix domain socket (Linux, macOS) + if not os.path.exists(self.socket_path): + debug(f"IPC socket not found: {self.socket_path}") + return False + + self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self.sock.settimeout(self.timeout) + self.sock.connect(self.socket_path) + return True + except Exception as exc: + debug(f"Failed to connect to MPV IPC: {exc}") + self.sock = None + return False + + def send_command(self, command_data: Dict[str, Any] | List[Any]) -> Optional[Dict[str, Any]]: + """Send a command to mpv and get response. + + Args: + command_data: Command dict (e.g. {"command": [...]}) or list (e.g. ["loadfile", ...]) + + Returns: + Response dict with 'error' key (value 'success' on success), or None on error. + """ + if not self.sock: + if not self.connect(): + return None + + try: + # Format command as JSON (mpv IPC protocol) + if isinstance(command_data, list): + request = {"command": command_data} + else: + request = command_data + + # Add request_id if not present to match response + if "request_id" not in request: + request["request_id"] = int(_time.time() * 1000) % 100000 + + payload = json.dumps(request) + "\n" + + # Send command + if self.is_windows: + self.sock.write(payload.encode('utf-8')) + self.sock.flush() + else: + self.sock.sendall(payload.encode('utf-8')) + + # Receive response + # We need to read lines until we find the one with matching request_id + # or until timeout/error. MPV might send events in between. + start_time = _time.time() + while _time.time() - start_time < self.timeout: + response_data = b"" + if self.is_windows: + try: + response_data = self.sock.readline() + except (OSError, IOError): + return None + else: + try: + # This is simplistic for Unix socket (might not get full line) + # But for now assuming MPV sends line-buffered JSON + chunk = self.sock.recv(4096) + if not chunk: + break + response_data = chunk + # TODO: Handle partial lines if needed + except socket.timeout: + return None + + if not response_data: + break + + try: + lines = response_data.decode('utf-8').strip().split('\n') + for line in lines: + if not line: continue + resp = json.loads(line) + + # Check if this is the response to our request + if resp.get("request_id") == request.get("request_id"): + return resp + + # If it's an error without request_id (shouldn't happen for commands) + if "error" in resp and "request_id" not in resp: + # Might be an event or async error + pass + except json.JSONDecodeError: + pass + + return None + except Exception as exc: + debug(f"Error sending command to MPV: {exc}") + self.disconnect() + return None + + def disconnect(self) -> None: + """Disconnect from mpv IPC socket.""" + if self.sock: + try: + self.sock.close() + except Exception: + pass + self.sock = None + + def __del__(self) -> None: + """Cleanup on object destruction.""" + self.disconnect() + + def __enter__(self): + """Context manager entry.""" + self.connect() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Context manager exit.""" + self.disconnect() + + +def send_to_mpv(file_url: str, title: str, headers: Optional[Dict[str, str]] = None, + append: bool = True) -> bool: + """Send a file to be played in the existing MPV instance via IPC. + + This attempts to send to an existing MPV instance. If it fails, the calling + code should start a new MPV instance with the IPC pipe. + + Args: + file_url: URL or path to file to play + title: Display title for the file + headers: Optional HTTP headers (dict) + append: If True, append to playlist; if False, replace + + Returns: + True if successfully sent to existing MPV, False if pipe unavailable. + """ + # Try to connect using the robust client + client = get_mpv_client() + if not client: + return False + + try: + # Command 1: Set headers if provided + if headers: + header_str = ",".join([f"{k}: {v}" for k, v in headers.items()]) + cmd_headers = { + "command": ["set_property", "http-header-fields", header_str], + "request_id": 0 + } + client.send_command(cmd_headers) + + # Command 2: Load file + # Use memory:// M3U to preserve title in playlist if provided + # This is required for YouTube URLs and proper playlist display + if title: + # Sanitize title for M3U (remove newlines) + safe_title = title.replace("\n", " ").replace("\r", "") + # M3U format: #EXTM3U\n#EXTINF:-1,Title\nURL + m3u_content = f"#EXTM3U\n#EXTINF:-1,{safe_title}\n{file_url}\n" + target = f"memory://{m3u_content}" + else: + target = file_url + + load_mode = "append-play" if append else "replace" + cmd_load = { + "command": ["loadfile", target, load_mode], + "request_id": 1 + } + + resp = client.send_command(cmd_load) + if not resp or resp.get('error') != 'success': + debug(f"MPV loadfile failed: {resp}") + return False + + # Command 3: Set title (metadata for display) - still useful for window title + if title: + safe_title_prop = title.replace('"', '\\"') + cmd_title = { + "command": ["set_property", "force-media-title", safe_title_prop], + "request_id": 2 + } + client.send_command(cmd_title) + + debug(f"Sent to existing MPV: {title}") + return True + + except Exception as e: + debug(f"Error in send_to_mpv: {e}") + return False + finally: + client.disconnect() + + + +def get_mpv_client(socket_path: Optional[str] = None) -> Optional[MPVIPCClient]: + """Get an MPV IPC client, attempting to connect. + + Args: + socket_path: Custom socket path (uses default if None) + + Returns: + Connected MPVIPCClient or None if connection fails. + """ + client = MPVIPCClient(socket_path=socket_path) + if client.connect(): + return client + return None + diff --git a/helper/search_provider.py b/helper/search_provider.py index 93443c5..0a4a2ec 100644 --- a/helper/search_provider.py +++ b/helper/search_provider.py @@ -1660,7 +1660,7 @@ class FileProvider(ABC): self.name = self.__class__.__name__.replace("FileProvider", "").lower() @abstractmethod - def upload(self, file_path: str) -> str: + def upload(self, file_path: str, **kwargs: Any) -> str: """Upload a file and return the URL.""" pass @@ -1677,7 +1677,7 @@ class ZeroXZeroFileProvider(FileProvider): self.name = "0x0" self.base_url = "https://0x0.st" - def upload(self, file_path: str) -> str: + def upload(self, file_path: str, **kwargs: Any) -> str: """Upload file to 0x0.st.""" from helper.http_client import HTTPClient import os @@ -1707,9 +1707,137 @@ class ZeroXZeroFileProvider(FileProvider): return True +class MatrixFileProvider(FileProvider): + """File provider for Matrix (Element) chat rooms.""" + + def __init__(self, config: Optional[Dict[str, Any]] = None): + super().__init__(config) + self.name = "matrix" + + def validate(self) -> bool: + """Check if Matrix is configured.""" + if not self.config: return False + matrix_conf = self.config.get('storage', {}).get('matrix', {}) + return bool(matrix_conf.get('homeserver') and matrix_conf.get('room_id') and (matrix_conf.get('access_token') or matrix_conf.get('password'))) + + def upload(self, file_path: str, **kwargs: Any) -> str: + """Upload file to Matrix room.""" + import requests + import mimetypes + from pathlib import Path + import json + + debug(f"[Matrix] Starting upload for: {file_path}") + debug(f"[Matrix] kwargs: {kwargs}") + + path = Path(file_path) + if not path.exists(): + raise FileNotFoundError(f"File not found: {file_path}") + + matrix_conf = self.config.get('storage', {}).get('matrix', {}) + homeserver = matrix_conf.get('homeserver') + access_token = matrix_conf.get('access_token') + room_id = matrix_conf.get('room_id') + + if not homeserver.startswith('http'): + homeserver = f"https://{homeserver}" + + # 1. Upload Media + # Use v3 API + upload_url = f"{homeserver}/_matrix/media/v3/upload" + headers = { + "Authorization": f"Bearer {access_token}", + "Content-Type": "application/octet-stream" + } + + mime_type, _ = mimetypes.guess_type(path) + if mime_type: + headers["Content-Type"] = mime_type + + filename = path.name + + debug(f"[Matrix] Uploading media to {upload_url} with mime_type: {mime_type}") + + with open(path, 'rb') as f: + resp = requests.post(upload_url, headers=headers, data=f, params={"filename": filename}) + + if resp.status_code != 200: + raise Exception(f"Matrix upload failed: {resp.text}") + + content_uri = resp.json().get('content_uri') + if not content_uri: + raise Exception("No content_uri returned from Matrix upload") + + debug(f"[Matrix] Media uploaded, content_uri: {content_uri}") + + # 2. Send Message + # Use v3 API + send_url = f"{homeserver}/_matrix/client/v3/rooms/{room_id}/send/m.room.message" + + # Determine msgtype with better fallback for audio + msgtype = "m.file" + ext = path.suffix.lower() + + # Explicit check for common audio extensions to force m.audio + # This prevents audio files being treated as generic files or video + AUDIO_EXTS = {'.mp3', '.flac', '.wav', '.m4a', '.aac', '.ogg', '.opus', '.wma', '.mka', '.alac'} + VIDEO_EXTS = {'.mp4', '.mkv', '.webm', '.mov', '.avi', '.flv', '.mpg', '.mpeg', '.ts', '.m4v', '.wmv'} + IMAGE_EXTS = {'.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', '.tiff'} + + if ext in AUDIO_EXTS: + msgtype = "m.audio" + elif ext in VIDEO_EXTS: + msgtype = "m.video" + elif ext in IMAGE_EXTS: + msgtype = "m.image" + elif mime_type: + if mime_type.startswith("audio/"): msgtype = "m.audio" + elif mime_type.startswith("video/"): msgtype = "m.video" + elif mime_type.startswith("image/"): msgtype = "m.image" + + debug(f"[Matrix] Determined msgtype: {msgtype} (ext: {ext}, mime: {mime_type})") + + info = { + "mimetype": mime_type, + "size": path.stat().st_size + } + + # Try to get duration for audio/video + if msgtype in ("m.audio", "m.video"): + try: + # Try mutagen first (lightweight) + # Use dynamic import to avoid top-level dependency if not installed + # Note: mutagen.File is available at package level at runtime but type checkers might miss it + import mutagen # type: ignore + m = mutagen.File(str(path)) # type: ignore + if m and m.info and hasattr(m.info, 'length'): + duration_ms = int(m.info.length * 1000) + info['duration'] = duration_ms + debug(f"[Matrix] Extracted duration: {duration_ms}ms") + except Exception as e: + debug(f"[Matrix] Failed to extract duration: {e}") + + payload = { + "msgtype": msgtype, + "body": filename, + "url": content_uri, + "info": info + } + + debug(f"[Matrix] Sending message payload: {json.dumps(payload, indent=2)}") + + resp = requests.post(send_url, headers=headers, json=payload) + if resp.status_code != 200: + raise Exception(f"Matrix send message failed: {resp.text}") + + event_id = resp.json().get('event_id') + return f"https://matrix.to/#/{room_id}/{event_id}" + + # File provider registry _FILE_PROVIDERS = { "0x0": ZeroXZeroFileProvider, + "matrix": MatrixFileProvider, } diff --git a/hydrus_health_check.py b/hydrus_health_check.py index c8c2ddd..6e69bef 100644 --- a/hydrus_health_check.py +++ b/hydrus_health_check.py @@ -7,7 +7,7 @@ disables Hydrus features if the API is unavailable. import logging import sys -from helper.logger import log +from helper.logger import log, debug from typing import Tuple, Optional, Dict, Any from pathlib import Path @@ -28,6 +28,11 @@ _MPV_AVAILABLE: Optional[bool] = None _MPV_UNAVAILABLE_REASON: Optional[str] = None _MPV_CHECK_COMPLETE = False +# Global state for Matrix availability +_MATRIX_AVAILABLE: Optional[bool] = None +_MATRIX_UNAVAILABLE_REASON: Optional[str] = None +_MATRIX_CHECK_COMPLETE = False + def check_hydrus_availability(config: Dict[str, Any]) -> Tuple[bool, Optional[str]]: """Check if Hydrus API is available by pinging it. @@ -80,20 +85,16 @@ def initialize_hydrus_health_check(config: Dict[str, Any]) -> None: _HYDRUS_CHECK_COMPLETE = True if is_available: - log("✅ Hydrus: ENABLED - All Hydrus features available", file=sys.stderr) + debug("✅ Hydrus: ENABLED - All Hydrus features available", file=sys.stderr) else: - log(f"⚠️ Hydrus: DISABLED - {reason or 'Connection failed'}", file=sys.stderr) - log("- Export functionality disabled", file=sys.stderr) - log("- Hydrus library features disabled", file=sys.stderr) - log("- Hydrus tag operations disabled", file=sys.stderr) - log("→ Local storage and All-Debrid features still available", file=sys.stderr) + debug(f"⚠️ Hydrus: DISABLED - {reason or 'Connection failed'}", file=sys.stderr) except Exception as e: logger.error(f"[Startup] Failed to initialize Hydrus health check: {e}", exc_info=True) _HYDRUS_AVAILABLE = False _HYDRUS_UNAVAILABLE_REASON = str(e) _HYDRUS_CHECK_COMPLETE = True - log(f"⚠️ Hydrus: DISABLED - Error during health check: {e}", file=sys.stderr) + debug(f"⚠️ Hydrus: DISABLED - Error during health check: {e}", file=sys.stderr) def check_debrid_availability(config: Dict[str, Any]) -> Tuple[bool, Optional[str]]: @@ -176,13 +177,10 @@ def initialize_debrid_health_check(config: Dict[str, Any]) -> None: _DEBRID_CHECK_COMPLETE = True if is_available: - log("✅ Debrid: ENABLED - All Debrid features available", file=sys.stderr) + debug("✅ Debrid: ENABLED - All Debrid features available", file=sys.stderr) logger.info("[Startup] Debrid health check PASSED") else: - log(f"⚠️ Debrid: DISABLED - {reason or 'Connection failed'}", file=sys.stderr) - log("- Debrid export disabled", file=sys.stderr) - log("- Debrid library features disabled", file=sys.stderr) - log("→ Local storage and Hydrus features still available", file=sys.stderr) + debug(f"⚠️ Debrid: DISABLED - {reason or 'Connection failed'}", file=sys.stderr) logger.warning(f"[Startup] Debrid health check FAILED: {reason}") except Exception as e: @@ -190,7 +188,7 @@ def initialize_debrid_health_check(config: Dict[str, Any]) -> None: _DEBRID_AVAILABLE = False _DEBRID_UNAVAILABLE_REASON = str(e) _DEBRID_CHECK_COMPLETE = True - log(f"⚠️ Debrid: DISABLED - Error during health check: {e}", file=sys.stderr) + debug(f"⚠️ Debrid: DISABLED - Error during health check: {e}", file=sys.stderr) def check_mpv_availability() -> Tuple[bool, Optional[str]]: @@ -263,11 +261,11 @@ def initialize_mpv_health_check() -> None: _MPV_CHECK_COMPLETE = True if is_available: - log("✅ MPV: ENABLED - All MPV features available", file=sys.stderr) + debug("✅ MPV: ENABLED - All MPV features available", file=sys.stderr) logger.info("[Startup] MPV health check PASSED") else: - log(f"⚠️ MPV: DISABLED - {reason or 'Connection failed'}", file=sys.stderr) - log("→ Hydrus features still available", file=sys.stderr) + debug(f"⚠️ MPV: DISABLED - {reason or 'Connection failed'}", file=sys.stderr) + debug("→ Hydrus features still available", file=sys.stderr) logger.warning(f"[Startup] MPV health check FAILED: {reason}") except Exception as e: @@ -275,7 +273,77 @@ def initialize_mpv_health_check() -> None: _MPV_AVAILABLE = False _MPV_UNAVAILABLE_REASON = str(e) _MPV_CHECK_COMPLETE = True - log(f"⚠️ MPV: DISABLED - Error during health check: {e}", file=sys.stderr) + debug(f"⚠️ MPV: DISABLED - Error during health check: {e}", file=sys.stderr) + + +def check_matrix_availability(config: Dict[str, Any]) -> Tuple[bool, Optional[str]]: + """Check if Matrix homeserver is reachable and credentials are valid. + + Args: + config: Application configuration dictionary + + Returns: + Tuple of (is_available: bool, reason: Optional[str]) + """ + try: + import requests + matrix_conf = config.get('storage', {}).get('matrix', {}) + homeserver = matrix_conf.get('homeserver') + access_token = matrix_conf.get('access_token') + + if not homeserver: + return False, "Not configured" + + if not homeserver.startswith('http'): + homeserver = f"https://{homeserver}" + + # Check versions endpoint (no auth required) + try: + resp = requests.get(f"{homeserver}/_matrix/client/versions", timeout=5) + if resp.status_code != 200: + return False, f"Homeserver returned {resp.status_code}" + except Exception as e: + return False, f"Homeserver unreachable: {e}" + + # Check auth if token provided (whoami) + if access_token: + try: + headers = {"Authorization": f"Bearer {access_token}"} + resp = requests.get(f"{homeserver}/_matrix/client/v3/account/whoami", headers=headers, timeout=5) + if resp.status_code != 200: + return False, f"Authentication failed: {resp.status_code}" + except Exception as e: + return False, f"Auth check failed: {e}" + + return True, None + + except Exception as e: + return False, str(e) + + +def initialize_matrix_health_check(config: Dict[str, Any]) -> None: + """Initialize Matrix health check at startup.""" + global _MATRIX_AVAILABLE, _MATRIX_UNAVAILABLE_REASON, _MATRIX_CHECK_COMPLETE + + logger.info("[Startup] Starting Matrix health check...") + + try: + is_available, reason = check_matrix_availability(config) + _MATRIX_AVAILABLE = is_available + _MATRIX_UNAVAILABLE_REASON = reason + _MATRIX_CHECK_COMPLETE = True + + if is_available: + debug("Matrix: ENABLED - Homeserver reachable", file=sys.stderr) + else: + if reason != "Not configured": + debug(f"Matrix: DISABLED - {reason}", file=sys.stderr) + + except Exception as e: + logger.error(f"[Startup] Failed to initialize Matrix health check: {e}", exc_info=True) + _MATRIX_AVAILABLE = False + _MATRIX_UNAVAILABLE_REASON = str(e) + _MATRIX_CHECK_COMPLETE = True def is_hydrus_available() -> bool: @@ -423,3 +491,52 @@ def enable_mpv_features() -> None: _MPV_AVAILABLE = True _MPV_UNAVAILABLE_REASON = None logger.info("[MPV] Features manually enabled") + + +def is_matrix_available() -> bool: + """Check if Matrix is available (from cached health check). + + Returns: + True if Matrix is available, False otherwise + """ + return _MATRIX_AVAILABLE is True + + +def get_matrix_unavailable_reason() -> Optional[str]: + """Get the reason why Matrix is unavailable. + + Returns: + String explaining why Matrix is unavailable, or None if available + """ + return _MATRIX_UNAVAILABLE_REASON if not is_matrix_available() else None + + +def is_matrix_check_complete() -> bool: + """Check if the Matrix health check has been completed. + + Returns: + True if health check has run, False if still pending + """ + return _MATRIX_CHECK_COMPLETE + + +def disable_matrix_features() -> None: + """Manually disable all Matrix features (for testing/fallback). + + This can be called if Matrix connectivity is lost after startup. + """ + global _MATRIX_AVAILABLE, _MATRIX_UNAVAILABLE_REASON + _MATRIX_AVAILABLE = False + _MATRIX_UNAVAILABLE_REASON = "Manually disabled or lost connection" + logger.warning("[Matrix] Features manually disabled") + + +def enable_matrix_features() -> None: + """Manually enable Matrix features (for testing/fallback). + + This can be called if Matrix connectivity is restored after startup. + """ + global _MATRIX_AVAILABLE, _MATRIX_UNAVAILABLE_REASON + _MATRIX_AVAILABLE = True + _MATRIX_UNAVAILABLE_REASON = None + logger.info("[Matrix] Features manually enabled") diff --git a/metadata.py b/metadata.py index 591d4d9..b906c16 100644 --- a/metadata.py +++ b/metadata.py @@ -5,7 +5,7 @@ import sys import shutil import sqlite3 import requests -from helper.logger import log +from helper.logger import log, debug from urllib.parse import urlsplit, urlunsplit, unquote from collections import deque from pathlib import Path @@ -1312,7 +1312,7 @@ def _read_sidecar_metadata(sidecar_path: Path) -> tuple[Optional[str], List[str] -def rename_by_metadata(file_path: Path, tags: Iterable[str]) -> Optional[Path]: +def rename(file_path: Path, tags: Iterable[str]) -> Optional[Path]: """Rename a file based on title: tag in the tags list. If a title: tag is present, renames the file and any .tags/.metadata sidecars. @@ -1350,13 +1350,13 @@ def rename_by_metadata(file_path: Path, tags: Iterable[str]) -> Optional[Path]: if new_path.exists(): try: new_path.unlink() - log(f"[rename_by_metadata] Replaced existing file: {new_name}", file=sys.stderr) + debug(f"Replaced existing file: {new_name}", file=sys.stderr) except Exception as e: - log(f"[rename_by_metadata] Warning: Could not replace target file {new_name}: {e}", file=sys.stderr) + debug(f"Warning: Could not replace target file {new_name}: {e}", file=sys.stderr) return None file_path.rename(new_path) - log(f"[rename_by_metadata] Renamed file: {old_name} → {new_name}", file=sys.stderr) + debug(f"Renamed file: {old_name} → {new_name}", file=sys.stderr) # Rename the .tags sidecar if it exists old_tags_path = file_path.parent / (old_name + '.tags') @@ -1369,21 +1369,21 @@ def rename_by_metadata(file_path: Path, tags: Iterable[str]) -> Optional[Path]: pass else: old_tags_path.rename(new_tags_path) - log(f"[rename_by_metadata] Renamed sidecar: {old_tags_path.name} → {new_tags_path.name}", file=sys.stderr) + debug(f"Renamed sidecar: {old_tags_path.name} → {new_tags_path.name}", file=sys.stderr) # Rename the .metadata sidecar if it exists old_metadata_path = file_path.parent / (old_name + '.metadata') if old_metadata_path.exists(): new_metadata_path = file_path.parent / (new_name + '.metadata') if new_metadata_path.exists(): - log(f"[rename_by_metadata] Warning: Target metadata already exists: {new_metadata_path.name}", file=sys.stderr) + debug(f"Warning: Target metadata already exists: {new_metadata_path.name}", file=sys.stderr) else: old_metadata_path.rename(new_metadata_path) - log(f"[rename_by_metadata] Renamed metadata: {old_metadata_path.name} → {new_metadata_path.name}", file=sys.stderr) + debug(f"Renamed metadata: {old_metadata_path.name} → {new_metadata_path.name}", file=sys.stderr) return new_path except Exception as exc: - log(f"[rename_by_metadata] Warning: Failed to rename file: {exc}", file=sys.stderr) + debug(f"Warning: Failed to rename file: {exc}", file=sys.stderr) return None @@ -1419,10 +1419,10 @@ def write_tags(media_path: Path, tags: Iterable[str], known_urls: Iterable[str], if db_tags: db.add_tags(media_path, db_tags) - log(f"Added tags to database for {media_path.name}") + debug(f"Added tags to database for {media_path.name}") return except Exception as e: - log(f"Failed to add tags to database: {e}", file=sys.stderr) + debug(f"Failed to add tags to database: {e}", file=sys.stderr) # Fall through to sidecar creation as fallback # Create sidecar path @@ -1449,7 +1449,7 @@ def write_tags(media_path: Path, tags: Iterable[str], known_urls: Iterable[str], if lines: sidecar.write_text("\n".join(lines) + "\n", encoding="utf-8") - log(f"Wrote tags to {sidecar}") + debug(f"Tags: {sidecar}") # Clean up legacy files for legacy_path in [media_path.with_name(media_path.name + '.tags'), media_path.with_name(media_path.name + '.tags.txt')]: @@ -1464,7 +1464,7 @@ def write_tags(media_path: Path, tags: Iterable[str], known_urls: Iterable[str], except FileNotFoundError: pass except OSError as exc: - log(f"Failed to write tag sidecar {sidecar}: {exc}", file=sys.stderr) + debug(f"Failed to write tag sidecar {sidecar}: {exc}", file=sys.stderr) def write_metadata(media_path: Path, hash_value: Optional[str] = None, known_urls: Optional[Iterable[str]] = None, relationships: Optional[Iterable[str]] = None, db=None) -> None: @@ -1503,10 +1503,10 @@ def write_metadata(media_path: Path, hash_value: Optional[str] = None, known_url if db_tags: db.add_tags(media_path, db_tags) - log(f"Added metadata to database for {media_path.name}") + debug(f"Added metadata to database for {media_path.name}") return except Exception as e: - log(f"Failed to add metadata to database: {e}", file=sys.stderr) + debug(f"Failed to add metadata to database: {e}", file=sys.stderr) # Fall through to sidecar creation as fallback # Create sidecar path @@ -1535,7 +1535,7 @@ def write_metadata(media_path: Path, hash_value: Optional[str] = None, known_url # Write metadata file if lines: sidecar.write_text("\n".join(lines) + "\n", encoding="utf-8") - log(f"Wrote metadata to {sidecar}") + debug(f"Wrote metadata to {sidecar}") else: # Remove if no content try: @@ -1543,7 +1543,7 @@ def write_metadata(media_path: Path, hash_value: Optional[str] = None, known_url except FileNotFoundError: pass except OSError as exc: - log(f"Failed to write metadata sidecar {sidecar}: {exc}", file=sys.stderr) + debug(f"Failed to write metadata sidecar {sidecar}: {exc}", file=sys.stderr) def extract_title(tags: Iterable[str]) -> Optional[str]: @@ -1892,7 +1892,7 @@ def extract_ytdlp_tags(entry: Dict[str, Any]) -> List[str]: Example: >>> entry = {'artist': 'The Beatles', 'album': 'Abbey Road', 'duration': 5247} >>> tags = extract_ytdlp_tags(entry) - >>> log(tags) + >>> debug(tags) ['artist:The Beatles', 'album:Abbey Road'] """ tags: List[str] = [] @@ -1986,7 +1986,7 @@ def dedup_tags_by_namespace(tags: List[str], keep_first: bool = True) -> List[st ... 'album:Abbey Road', 'artist:Beatles' ... ] >>> dedup = dedup_tags_by_namespace(tags) - >>> log(dedup) + >>> debug(dedup) ['artist:Beatles', 'album:Abbey Road', 'tag:rock'] """ if not tags: @@ -2053,7 +2053,7 @@ def merge_multiple_tag_lists( >>> list1 = ['artist:Beatles', 'album:Abbey Road'] >>> list2 = ['artist:Beatles', 'album:Abbey Road', 'tag:rock'] >>> merged = merge_multiple_tag_lists([list1, list2]) - >>> log(merged) + >>> debug(merged) ['artist:Beatles', 'album:Abbey Road', 'tag:rock'] """ if not sources: @@ -2137,7 +2137,7 @@ def read_tags_from_file(file_path: Path) -> List[str]: Example: >>> tags = read_tags_from_file(Path('file.txt.tags')) - >>> log(tags) + >>> debug(tags) ['artist:Beatles', 'album:Abbey Road'] """ file_path = Path(file_path) @@ -2271,7 +2271,7 @@ def embed_metadata_in_file( # Check if FFmpeg is available ffmpeg_path = shutil.which('ffmpeg') if not ffmpeg_path: - log(f"⚠️ FFmpeg not found; cannot embed metadata in {file_path.name}", file=sys.stderr) + debug(f"⚠️ FFmpeg not found; cannot embed metadata in {file_path.name}", file=sys.stderr) return False # Create temporary file for output @@ -2294,18 +2294,18 @@ def embed_metadata_in_file( # Replace original with temp file file_path.unlink() temp_file.rename(file_path) - log(f"✅ Embedded metadata in file: {file_path.name}", file=sys.stderr) + debug(f"✅ Embedded metadata in file: {file_path.name}", file=sys.stderr) return True else: # Clean up temp file if it exists if temp_file.exists(): temp_file.unlink() - log(f"❌ FFmpeg metadata embedding failed for {file_path.name}", file=sys.stderr) + debug(f"❌ FFmpeg metadata embedding failed for {file_path.name}", file=sys.stderr) if result.stderr: # Safely decode stderr, ignoring invalid UTF-8 bytes try: stderr_text = result.stderr.decode('utf-8', errors='replace')[:200] - log(f"FFmpeg stderr: {stderr_text}", file=sys.stderr) + debug(f"FFmpeg stderr: {stderr_text}", file=sys.stderr) except Exception: pass return False @@ -2315,7 +2315,7 @@ def embed_metadata_in_file( temp_file.unlink() except Exception: pass - log(f"❌ Error embedding metadata: {exc}", file=sys.stderr) + debug(f"❌ Error embedding metadata: {exc}", file=sys.stderr) return False @@ -2402,7 +2402,7 @@ def normalize_tags_from_source( Example: >>> entry = {'artist': 'Beatles', 'album': 'Abbey Road'} >>> tags = normalize_tags_from_source(entry, 'ytdlp') - >>> log(tags) + >>> debug(tags) ['artist:Beatles', 'album:Abbey Road'] """ if source_type == 'auto': @@ -2600,10 +2600,10 @@ def imdb(imdb_id: str = typer.Argument(..., help="IMDb identifier (ttXXXXXXX)")) """Lookup an IMDb title.""" try: result = imdb_tag(imdb_id) - log(json.dumps(result, ensure_ascii=False), flush=True) + debug(json.dumps(result, ensure_ascii=False), flush=True) except Exception as exc: error_payload = {"error": str(exc)} - log(json.dumps(error_payload, ensure_ascii=False), flush=True) + debug(json.dumps(error_payload, ensure_ascii=False), flush=True) raise typer.Exit(code=1) @app.command(help="Lookup a MusicBrainz entity") @@ -2614,10 +2614,10 @@ def musicbrainz( """Lookup a MusicBrainz entity.""" try: result = fetch_musicbrainz_tags(mbid, entity) - log(json.dumps(result, ensure_ascii=False), flush=True) + debug(json.dumps(result, ensure_ascii=False), flush=True) except Exception as exc: error_payload = {"error": str(exc)} - log(json.dumps(error_payload, ensure_ascii=False), flush=True) + debug(json.dumps(error_payload, ensure_ascii=False), flush=True) raise typer.Exit(code=1) @app.command(name="remote-tags", help="Normalize a remote metadata payload") @@ -2633,10 +2633,10 @@ def remote_tags(payload: Optional[str] = typer.Option(None, "--payload", help="J if context and not isinstance(context, dict): raise ValueError("context must be an object") result = build_remote_bundle(metadata, existing, context) - log(json.dumps(result, ensure_ascii=False), flush=True) + debug(json.dumps(result, ensure_ascii=False), flush=True) except Exception as exc: error_payload = {"error": str(exc)} - log(json.dumps(error_payload, ensure_ascii=False), flush=True) + debug(json.dumps(error_payload, ensure_ascii=False), flush=True) raise typer.Exit(code=1) @app.command(name="remote-fetch", help="Resolve remote metadata bundle") @@ -2645,10 +2645,10 @@ def remote_fetch(payload: Optional[str] = typer.Option(None, "--payload", help=" try: payload_data = _load_payload(payload) result = resolve_remote_metadata(payload_data) - log(json.dumps(result, ensure_ascii=False), flush=True) + debug(json.dumps(result, ensure_ascii=False), flush=True) except Exception as exc: error_payload = {"error": str(exc)} - log(json.dumps(error_payload, ensure_ascii=False), flush=True) + debug(json.dumps(error_payload, ensure_ascii=False), flush=True) raise typer.Exit(code=1) @app.command(name="expand-tag", help="Expand metadata references into tags") @@ -2657,10 +2657,10 @@ def expand_tag(payload: Optional[str] = typer.Option(None, "--payload", help="JS try: payload_data = _load_payload(payload) result = expand_metadata_tag(payload_data) - log(json.dumps(result, ensure_ascii=False), flush=True) + debug(json.dumps(result, ensure_ascii=False), flush=True) except Exception as exc: error_payload = {"error": str(exc)} - log(json.dumps(error_payload, ensure_ascii=False), flush=True) + debug(json.dumps(error_payload, ensure_ascii=False), flush=True) raise typer.Exit(code=1) @app.command(name="hydrus-fetch", help="Fetch Hydrus metadata for a file") @@ -2669,10 +2669,10 @@ def hydrus_fetch(payload: Optional[str] = typer.Option(None, "--payload", help=" try: payload_data = _load_payload(payload) result = fetch_hydrus_metadata(payload_data) - log(json.dumps(result, ensure_ascii=False), flush=True) + debug(json.dumps(result, ensure_ascii=False), flush=True) except Exception as exc: error_payload = {"error": str(exc)} - log(json.dumps(error_payload, ensure_ascii=False), flush=True) + debug(json.dumps(error_payload, ensure_ascii=False), flush=True) raise typer.Exit(code=1) @app.command(name="hydrus-fetch-url", help="Fetch Hydrus metadata using a source URL") @@ -2681,10 +2681,10 @@ def hydrus_fetch_url(payload: Optional[str] = typer.Option(None, "--payload", he try: payload_data = _load_payload(payload) result = fetch_hydrus_metadata_by_url(payload_data) - log(json.dumps(result, ensure_ascii=False), flush=True) + debug(json.dumps(result, ensure_ascii=False), flush=True) except Exception as exc: error_payload = {"error": str(exc)} - log(json.dumps(error_payload, ensure_ascii=False), flush=True) + debug(json.dumps(error_payload, ensure_ascii=False), flush=True) raise typer.Exit(code=1) @app.command(name="sync-sidecar", help="Synchronise .tags sidecar with supplied data") @@ -2693,10 +2693,10 @@ def sync_sidecar_cmd(payload: Optional[str] = typer.Option(None, "--payload", he try: payload_data = _load_payload(payload) result = sync_sidecar(payload_data) - log(json.dumps(result, ensure_ascii=False), flush=True) + debug(json.dumps(result, ensure_ascii=False), flush=True) except Exception as exc: error_payload = {"error": str(exc)} - log(json.dumps(error_payload, ensure_ascii=False), flush=True) + debug(json.dumps(error_payload, ensure_ascii=False), flush=True) raise typer.Exit(code=1) @app.command(name="update-tag", help="Update or rename a tag") @@ -2705,10 +2705,10 @@ def update_tag_cmd(payload: Optional[str] = typer.Option(None, "--payload", help try: payload_data = _load_payload(payload) result = apply_tag_mutation(payload_data, 'update') - log(json.dumps(result, ensure_ascii=False), flush=True) + debug(json.dumps(result, ensure_ascii=False), flush=True) except Exception as exc: error_payload = {"error": str(exc)} - log(json.dumps(error_payload, ensure_ascii=False), flush=True) + debug(json.dumps(error_payload, ensure_ascii=False), flush=True) raise typer.Exit(code=1) def main(argv: Optional[List[str]] = None) -> int: @@ -3102,7 +3102,7 @@ def fetch_openlibrary_metadata_tags(isbn: Optional[str] = None, olid: Optional[s metadata_tags.append(subject_clean) except Exception as e: - log(f"⚠ Failed to fetch OpenLibrary metadata: {e}") + debug(f"⚠ Failed to fetch OpenLibrary metadata: {e}") return metadata_tags diff --git a/result_table.py b/result_table.py index 90d1467..95f75ea 100644 --- a/result_table.py +++ b/result_table.py @@ -251,8 +251,22 @@ class ResultTable: def _add_search_result(self, row: ResultRow, result: Any) -> None: """Extract and add SearchResult fields to row.""" # Core fields - if hasattr(result, 'title') and result.title: - row.add_column("Title", result.title) + title = getattr(result, 'title', '') + origin = getattr(result, 'origin', '').lower() + + # Handle extension separation for local files + extension = "" + if title and origin == 'local': + path_obj = Path(title) + if path_obj.suffix: + extension = path_obj.suffix.lstrip('.') + title = path_obj.stem + + if title: + row.add_column("Title", title) + + # Extension column + row.add_column("Ext", extension) if hasattr(result, 'origin') and result.origin: row.add_column("Source", result.origin) @@ -263,18 +277,6 @@ class ResultTable: if hasattr(result, 'media_kind') and result.media_kind: row.add_column("Type", result.media_kind) - # Target (file path or URL) - if hasattr(result, 'target') and result.target: - # Truncate long paths for display - target_str = str(result.target) - if len(target_str) > 60: - target_str = "..." + target_str[-57:] - row.add_column("Target", target_str) - - # Hash - if hasattr(result, 'hash_hex') and result.hash_hex: - row.add_column("Hash", result.hash_hex[:16] + "...") # First 16 chars - # Tags summary if hasattr(result, 'tag_summary') and result.tag_summary: tags_str = str(result.tag_summary) @@ -305,6 +307,7 @@ class ResultTable: Shows only essential columns: - Title (required) + - Ext (extension) - Origin (source backend) - Size (formatted MB, integer only) @@ -313,9 +316,23 @@ class ResultTable: """ # Title (required - use origin as fallback) title = getattr(item, 'title', None) or getattr(item, 'origin', 'Unknown') + origin = getattr(item, 'origin', '').lower() + + # Handle extension separation for local files + extension = "" + if title and origin == 'local': + # Try to split extension + path_obj = Path(title) + if path_obj.suffix: + extension = path_obj.suffix.lstrip('.') + title = path_obj.stem + if title: row.add_column("Title", title[:90] + ("..." if len(title) > 90 else "")) + # Extension column - always add to maintain column order + row.add_column("Ext", extension) + # Storage (source backend - hydrus, local, debrid, etc) if hasattr(item, 'origin') and item.origin: row.add_column("Storage", item.origin) @@ -364,9 +381,6 @@ class ResultTable: file_str = "..." + file_str[-57:] row.add_column("Path", file_str) - if hasattr(obj, 'file_hash') and obj.file_hash: - row.add_column("Hash", obj.file_hash[:16] + "...") - # Tags if hasattr(obj, 'tags') and obj.tags: tags_str = ", ".join(obj.tags[:3]) # First 3 tags @@ -406,7 +420,10 @@ class ResultTable: # Helper to determine if a field should be hidden from display def is_hidden_field(field_name: Any) -> bool: # Hide internal/metadata fields - hidden_fields = {'__', 'id', 'action', 'parent_id', 'is_temp', 'file_path', 'extra'} + hidden_fields = { + '__', 'id', 'action', 'parent_id', 'is_temp', 'file_path', 'extra', + 'target', 'hash', 'hash_hex', 'file_hash' + } if isinstance(field_name, str): if field_name.startswith('__'): return True @@ -417,6 +434,30 @@ class ResultTable: # Strip out hidden metadata fields (prefixed with __) visible_data = {k: v for k, v in data.items() if not is_hidden_field(k)} + # Handle extension separation for local files + origin = str(visible_data.get('origin', '') or visible_data.get('source', '')).lower() + + # Debug logging + # print(f"DEBUG: Processing dict result. Origin: {origin}, Keys: {list(visible_data.keys())}") + + if origin == 'local': + # Find title field + title_field = next((f for f in ['title', 'name', 'filename'] if f in visible_data), None) + if title_field: + title_val = str(visible_data[title_field]) + path_obj = Path(title_val) + if path_obj.suffix: + extension = path_obj.suffix.lstrip('.') + visible_data[title_field] = path_obj.stem + visible_data['ext'] = extension + # print(f"DEBUG: Split extension. Title: {visible_data[title_field]}, Ext: {extension}") + else: + visible_data['ext'] = "" + + # Ensure 'ext' is present so it gets picked up by priority_groups in correct order + if 'ext' not in visible_data: + visible_data['ext'] = "" + # Track which fields we've already added to avoid duplicates added_fields = set() column_count = 0 # Track total columns added @@ -467,10 +508,9 @@ class ResultTable: # Priority field groups - uses first matching field in each group priority_groups = [ ('title | name | filename', ['title', 'name', 'filename']), - ('origin | source', ['origin', 'source']), + ('ext', ['ext']), + ('origin | source | store', ['origin', 'source', 'store']), ('type | media_kind | kind', ['type', 'media_kind', 'kind']), - ('target | path | url', ['target', 'path', 'url']), - ('hash | hash_hex | file_hash', ['hash', 'hash_hex', 'file_hash']), ('tags | tag_summary', ['tags', 'tag_summary']), ('detail | description', ['detail', 'description']), ] @@ -485,7 +525,12 @@ class ResultTable: if len(value_str) > 60: value_str = value_str[:57] + "..." - row.add_column(field.replace('_', ' ').title(), value_str) + # Special case for Origin/Source -> Store to match user preference + col_name = field.replace('_', ' ').title() + if field in ['origin', 'source']: + col_name = "Store" + + row.add_column(col_name, value_str) added_fields.add(field) column_count += 1 break # Use first match in this group, skip rest @@ -509,106 +554,6 @@ class ResultTable: # Don't display it added_fields.add('_selection_args') - # Helper to determine if a field should be hidden from display - def is_hidden_field(field_name: Any) -> bool: - # Hide internal/metadata fields - hidden_fields = {'__', 'id', 'action', 'parent_id', 'is_temp', 'file_path', 'extra'} - if isinstance(field_name, str): - if field_name.startswith('__'): - return True - if field_name in hidden_fields: - return True - return False - - # Strip out hidden metadata fields (prefixed with __) - visible_data = {k: v for k, v in data.items() if not is_hidden_field(k)} - - # Track which fields we've already added to avoid duplicates - added_fields = set() - column_count = 0 # Track total columns added - - # Helper function to format values - def format_value(value: Any) -> str: - if isinstance(value, list): - formatted = ", ".join(str(v) for v in value[:3]) - if len(value) > 3: - formatted += f", +{len(value) - 3} more" - return formatted - return str(value) - - # Special handling for 'columns' field from search providers - # If present, use it to populate row columns dynamically - if 'columns' in visible_data and isinstance(visible_data['columns'], list) and visible_data['columns']: - try: - for col_name, col_value in visible_data['columns']: - # Skip the "#" column as ResultTable already adds row numbers - if col_name == '#': - continue - if column_count >= self.max_columns: - break - col_value_str = format_value(col_value) - if len(col_value_str) > 60: - col_value_str = col_value_str[:57] + "..." - row.add_column(col_name, col_value_str) - added_fields.add(col_name.lower()) - column_count += 1 - # Mark 'columns' as handled so we don't add it as a field - added_fields.add('columns') - # Also mark common fields that shouldn't be re-displayed if they're in columns - # This prevents showing both "Store" (from columns) and "Origin" (from data fields) - added_fields.add('origin') - added_fields.add('source') - added_fields.add('target') - added_fields.add('path') - added_fields.add('media_kind') - added_fields.add('detail') - added_fields.add('annotations') - added_fields.add('full_metadata') # Don't display full metadata as column - except Exception: - # Fall back to regular field handling if columns format is unexpected - pass - - # Only add priority groups if we haven't already filled columns from 'columns' field - if column_count == 0: - # Priority field groups - uses first matching field in each group - priority_groups = [ - ('title | name | filename', ['title', 'name', 'filename']), - ('origin | source', ['origin', 'source']), - ('type | media_kind | kind', ['type', 'media_kind', 'kind']), - ('target | path | url', ['target', 'path', 'url']), - ('hash | hash_hex | file_hash', ['hash', 'hash_hex', 'file_hash']), - ('tags | tag_summary', ['tags', 'tag_summary']), - ('detail | description', ['detail', 'description']), - ] - - # Add priority field groups first - use first match in each group - for _group_label, field_options in priority_groups: - if column_count >= self.max_columns: - break - for field in field_options: - if field in visible_data and field not in added_fields: - value_str = format_value(visible_data[field]) - if len(value_str) > 60: - value_str = value_str[:57] + "..." - - row.add_column(field.replace('_', ' ').title(), value_str) - added_fields.add(field) - column_count += 1 - break # Use first match in this group, skip rest - - # Add remaining fields only if we haven't hit max_columns (and no explicit columns were set) - if column_count < self.max_columns: - for key, value in visible_data.items(): - if column_count >= self.max_columns: - break - if key not in added_fields: # Only add if not already added - value_str = format_value(value) - if len(value_str) > 40: - value_str = value_str[:37] + "..." - row.add_column(key.replace('_', ' ').title(), value_str) - added_fields.add(key) # Track in added_fields to prevent re-adding - column_count += 1 - def _add_generic_object(self, row: ResultRow, obj: Any) -> None: """Extract and add fields from generic objects.""" if hasattr(obj, '__dict__'): diff --git a/test_search.py b/test_search.py new file mode 100644 index 0000000..5b5d30a --- /dev/null +++ b/test_search.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python +import sys +sys.path.insert(0, '.') +from helper.file_storage import LocalStorageBackend +from config import get_local_storage_path +import json + +config = json.load(open('config.json')) +# Get the location string properly +location = get_local_storage_path(config) +if isinstance(location, dict): + location = location.get('path') or str(location) + +backend = LocalStorageBackend(config) + +# Test searches +for query in ['sie*', 'sie', '*']: + print(f"\n=== Searching for: {query} ===") + results = backend.search(query, location=str(location), limit=5) + print(f"Found {len(results)} results") + for r in results: + print(f" - {r.get('title')} ({r.get('ext')}) @ {r.get('path')}") +