This commit is contained in:
nose
2025-12-06 00:10:19 -08:00
parent 5482ee5586
commit f29709d951
20 changed files with 1353 additions and 419 deletions

55
CLI.py
View File

@@ -1112,6 +1112,28 @@ def _execute_pipeline(tokens: list):
cmd_name = stage_tokens[0].replace("_", "-").lower() cmd_name = stage_tokens[0].replace("_", "-").lower()
stage_args = stage_tokens[1:] stage_args = stage_tokens[1:]
# Bare '@' means "use the subject for the current result table" (e.g., the file whose tags/URLs are shown)
if cmd_name == "@":
subject = ctx.get_last_result_subject()
if subject is None:
print("No current result context available for '@'\n")
pipeline_status = "failed"
pipeline_error = "No result subject for @"
return
# Normalize to list for downstream expectations
piped_result = subject
try:
subject_items = subject if isinstance(subject, list) else [subject]
ctx.set_last_items(subject_items)
except Exception:
pass
if pipeline_session and worker_manager:
try:
worker_manager.log_step(pipeline_session.worker_id, "@ used current table subject")
except Exception:
pass
continue
# Check if this is a selection syntax (@N, @N-M, @{N,M,K}, @*, @3,5,7, @3-6,8) instead of a command # Check if this is a selection syntax (@N, @N-M, @{N,M,K}, @*, @3,5,7, @3-6,8) instead of a command
if cmd_name.startswith('@'): if cmd_name.startswith('@'):
@@ -1280,8 +1302,11 @@ def _execute_pipeline(tokens: list):
} }
# Commands that manage their own table/history state (e.g. get-tag) # Commands that manage their own table/history state (e.g. get-tag)
self_managing_commands = { self_managing_commands = {
'get-tag', 'get_tag', 'tags' 'get-tag', 'get_tag', 'tags',
'search-file', 'search_file'
} }
overlay_table = ctx.get_display_table() if hasattr(ctx, 'get_display_table') else None
if cmd_name in self_managing_commands: if cmd_name in self_managing_commands:
# Command has already set the table and history # Command has already set the table and history
@@ -1302,22 +1327,28 @@ def _execute_pipeline(tokens: list):
for emitted in pipeline_ctx.emits: for emitted in pipeline_ctx.emits:
table.add_result(emitted) table.add_result(emitted)
else: else:
table = ResultTable(table_title)
for emitted in pipeline_ctx.emits:
table.add_result(emitted)
if cmd_name in selectable_commands: if cmd_name in selectable_commands:
table = ResultTable(table_title)
for emitted in pipeline_ctx.emits:
table.add_result(emitted)
table.set_source_command(cmd_name, stage_args) table.set_source_command(cmd_name, stage_args)
ctx.set_last_result_table(table, pipeline_ctx.emits) ctx.set_last_result_table(table, pipeline_ctx.emits)
elif cmd_name in display_only_commands: elif cmd_name in display_only_commands:
table = ResultTable(table_title)
for emitted in pipeline_ctx.emits:
table.add_result(emitted)
# Display-only: show table but preserve search context # Display-only: show table but preserve search context
ctx.set_last_result_items_only(pipeline_ctx.emits) ctx.set_last_result_items_only(pipeline_ctx.emits)
else: else:
# Action commands (add-*, delete-*): update items only, don't change table/history # Action commands: avoid overwriting search history/table unless a display overlay exists
ctx.set_last_result_items_only(pipeline_ctx.emits) if overlay_table is not None:
table = overlay_table
else:
table = None
print() if table is not None:
print(table.format_plain()) print()
print(table.format_plain())
else: else:
for emitted in pipeline_ctx.emits: for emitted in pipeline_ctx.emits:
if isinstance(emitted, dict): if isinstance(emitted, dict):
@@ -1518,7 +1549,8 @@ def _execute_cmdlet(cmd_name: str, args: list):
} }
# Commands that manage their own table/history state (e.g. get-tag) # Commands that manage their own table/history state (e.g. get-tag)
self_managing_commands = { self_managing_commands = {
'get-tag', 'get_tag', 'tags' 'get-tag', 'get_tag', 'tags',
'search-file', 'search_file'
} }
if cmd_name in self_managing_commands: if cmd_name in self_managing_commands:
@@ -1574,7 +1606,8 @@ def _execute_cmdlet(cmd_name: str, args: list):
'check-file-status', 'check_file_status' 'check-file-status', 'check_file_status'
} }
self_managing_commands = { self_managing_commands = {
'get-tag', 'get_tag', 'tags' 'get-tag', 'get_tag', 'tags',
'search-file', 'search_file'
} }
if cmd_name in self_managing_commands: if cmd_name in self_managing_commands:

View File

@@ -248,6 +248,20 @@ class PipelineExecutor:
piped_input: Any, piped_input: Any,
on_log: Optional[Callable[[str], None]], on_log: Optional[Callable[[str], None]],
) -> PipelineStageResult: ) -> PipelineStageResult:
# Bare '@' means use the subject associated with the current result table (e.g., the file shown in a tag/URL view)
if token == "@":
subject = ctx.get_last_result_subject()
if subject is None:
stage.status = "failed"
stage.error = "Selection requested (@) but there is no current result context"
return stage
stage.emitted = subject if isinstance(subject, list) else [subject]
ctx.set_last_items(stage.emitted)
stage.status = "completed"
if on_log:
on_log("Selected current table subject via @")
return stage
selection = self._parse_selection(token) selection = self._parse_selection(token)
items = piped_input or [] items = piped_input or []
if not isinstance(items, list): if not isinstance(items, list):

View File

@@ -177,7 +177,7 @@ class SharedArgs:
LIBRARY = CmdletArg( LIBRARY = CmdletArg(
"library", "library",
type="string", type="string",
choices=["hydrus", "local", "soulseek", "libgen", "debrid", "ftp"], choices=["hydrus", "local", "soulseek", "libgen", "ftp"],
description="Search library or source location." description="Search library or source location."
) )
@@ -209,7 +209,7 @@ class SharedArgs:
STORAGE = CmdletArg( STORAGE = CmdletArg(
"storage", "storage",
type="enum", type="enum",
choices=["hydrus", "local", "debrid", "ftp", "matrix"], choices=["hydrus", "local", "ftp", "matrix"],
required=False, required=False,
description="Storage location or destination for saving/uploading files.", description="Storage location or destination for saving/uploading files.",
alias="s", alias="s",
@@ -240,12 +240,12 @@ class SharedArgs:
def resolve_storage(storage_value: Optional[str], default: Optional[Path] = None) -> Path: def resolve_storage(storage_value: Optional[str], default: Optional[Path] = None) -> Path:
"""Resolve a storage location name to a filesystem Path. """Resolve a storage location name to a filesystem Path.
Maps storage identifiers (hydrus, local, debrid, ftp) to their actual Maps storage identifiers (hydrus, local, ftp) to their actual
filesystem paths. This is the single source of truth for storage location resolution. filesystem paths. This is the single source of truth for storage location resolution.
Note: 0x0.st is now accessed via file providers (-provider 0x0), not storage. Note: 0x0.st is now accessed via file providers (-provider 0x0), not storage.
Args: Args:
storage_value: One of 'hydrus', 'local', 'debrid', 'ftp', or None storage_value: One of 'hydrus', 'local', 'ftp', or None
default: Path to return if storage_value is None (defaults to Videos) default: Path to return if storage_value is None (defaults to Videos)
Returns: Returns:
@@ -266,7 +266,6 @@ class SharedArgs:
storage_map = { storage_map = {
'local': Path.home() / "Videos", 'local': Path.home() / "Videos",
'hydrus': Path.home() / ".hydrus" / "client_files", 'hydrus': Path.home() / ".hydrus" / "client_files",
'debrid': Path.home() / "Debrid",
'ftp': Path.home() / "FTP", 'ftp': Path.home() / "FTP",
'matrix': Path.home() / "Matrix", # Placeholder, not used for upload path 'matrix': Path.home() / "Matrix", # Placeholder, not used for upload path
} }

View File

@@ -185,7 +185,13 @@ def _persist_local_metadata(
log(traceback.format_exc(), file=sys.stderr) log(traceback.format_exc(), file=sys.stderr)
def _handle_local_transfer(media_path: Path, destination_root: Path, result: Any, config: Optional[Dict[str, Any]] = None) -> Tuple[int, Optional[Path]]: def _handle_local_transfer(
media_path: Path,
destination_root: Path,
result: Any,
config: Optional[Dict[str, Any]] = None,
export_mode: bool = False,
) -> Tuple[int, Optional[Path]]:
"""Transfer a file to local storage and return (exit_code, destination_path). """Transfer a file to local storage and return (exit_code, destination_path).
Args: Args:
@@ -246,34 +252,60 @@ def _handle_local_transfer(media_path: Path, destination_root: Path, result: Any
relationships = extract_relationships(result) relationships = extract_relationships(result)
duration = extract_duration(result) duration = extract_duration(result)
# Rename source file if title tag is present (to ensure destination has correct name) # Skip title-based renaming for library mode (hash-based) but allow for export mode below
title_tag = next((t for t in merged_tags if str(t).strip().lower().startswith("title:")), None)
if title_tag:
try:
from helper.utils import unique_path
title_val = title_tag.split(":", 1)[1].strip()
# Sanitize filename (keep spaces, but remove illegal chars)
safe_title = "".join(c for c in title_val if c.isalnum() or c in " ._-()[]").strip()
if safe_title:
new_name = safe_title + media_path.suffix
new_path = media_path.parent / new_name
if new_path != media_path:
# Ensure we don't overwrite existing files
new_path = unique_path(new_path)
media_path.rename(new_path)
media_path = new_path
debug(f"Renamed source file to match title: {media_path.name}")
except Exception as e:
log(f"Warning: Failed to rename file to match title: {e}", file=sys.stderr)
try: try:
# Ensure filename is the hash when adding to local storage if export_mode:
resolved_hash = _resolve_file_hash(result, sidecar_hash, media_path) title_tag = next((t for t in merged_tags if str(t).strip().lower().startswith("title:")), None)
if resolved_hash: title_value = ""
hashed_name = resolved_hash + media_path.suffix if title_tag:
target_path = destination_root / hashed_name title_value = title_tag.split(":", 1)[1].strip()
media_path = media_path.rename(target_path) if media_path != target_path else media_path if not title_value:
dest_file = storage["local"].upload(media_path, location=str(destination_root), move=True) title_value = media_path.stem.replace("_", " ").strip()
# Sanitize filename
safe_title = "".join(c for c in title_value if c.isalnum() or c in " ._-()[]{}'`").strip()
base_name = safe_title or media_path.stem
new_name = base_name + media_path.suffix
target_path = destination_root / new_name
destination_root.mkdir(parents=True, exist_ok=True)
if target_path.exists():
from helper.utils import unique_path
target_path = unique_path(target_path)
shutil.move(str(media_path), target_path)
# Move/copy sidecar files alongside
possible_sidecars = [
media_path.with_suffix(media_path.suffix + ".json"),
media_path.with_name(media_path.name + ".tags"),
media_path.with_name(media_path.name + ".tags.txt"),
media_path.with_name(media_path.name + ".metadata"),
media_path.with_name(media_path.name + ".notes"),
]
for sc in possible_sidecars:
try:
if sc.exists():
suffix_part = sc.name.replace(media_path.name, "", 1)
dest_sidecar = target_path.parent / f"{target_path.name}{suffix_part}"
dest_sidecar.parent.mkdir(parents=True, exist_ok=True)
shutil.move(str(sc), dest_sidecar)
except Exception:
pass
media_path = target_path
dest_file = str(target_path)
else:
# Ensure filename is the hash when adding to local storage
resolved_hash = _resolve_file_hash(result, sidecar_hash, media_path)
if resolved_hash:
hashed_name = resolved_hash + media_path.suffix
target_path = destination_root / hashed_name
try:
if target_path.exists():
target_path.unlink()
except Exception:
pass
if media_path != target_path:
media_path = media_path.rename(target_path)
dest_file = storage["local"].upload(media_path, location=str(destination_root), move=True)
except Exception as exc: except Exception as exc:
log(f"❌ Failed to move file into {destination_root}: {exc}", file=sys.stderr) log(f"❌ Failed to move file into {destination_root}: {exc}", file=sys.stderr)
return 1, None return 1, None
@@ -291,9 +323,12 @@ def _handle_local_transfer(media_path: Path, destination_root: Path, result: Any
if filename_title: if filename_title:
final_tags.insert(0, f"title:{filename_title}") final_tags.insert(0, f"title:{filename_title}")
_persist_local_metadata(destination_root, dest_path, final_tags, merged_urls, file_hash, relationships, duration, media_kind) if not export_mode:
_cleanup_sidecar_files(media_path, sidecar_path) _persist_local_metadata(destination_root, dest_path, final_tags, merged_urls, file_hash, relationships, duration, media_kind)
debug(f"✅ Moved to local library: {dest_path}") _cleanup_sidecar_files(media_path, sidecar_path)
debug(f"✅ Moved to local library: {dest_path}")
else:
debug(f"✅ Exported to destination: {dest_path}")
return 0, dest_path return 0, dest_path
@@ -333,17 +368,26 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
provider_name: Optional[str] = None provider_name: Optional[str] = None
delete_after_upload = False delete_after_upload = False
# Check if -path argument was provided to use direct file path instead of piped result # Check if -path argument was provided
path_arg = parsed.get("path") path_arg = parsed.get("path")
if path_arg: if path_arg:
# Create a pseudo-result object from the file path path_value = Path(str(path_arg).strip())
media_path = Path(str(path_arg).strip()) # If there is no piped result, treat -path as the source file (existing behavior)
if not media_path.exists(): if result is None:
log(f"❌ File not found: {media_path}") if not path_value.exists():
return 1 log(f"❌ File not found: {path_value}")
# Create result dict with the file path and origin 'wild' for direct path inputs return 1
result = {"target": str(media_path), "origin": "wild"} result = {"target": str(path_value), "origin": "wild"}
log(f"Using direct file path: {media_path}") log(f"Using direct file path: {path_value}")
else:
# Piped result present: treat -path as destination (export)
if not path_value.exists():
try:
path_value.mkdir(parents=True, exist_ok=True)
except Exception as exc:
log(f"❌ Cannot create destination directory {path_value}: {exc}", file=sys.stderr)
return 1
location = str(path_value)
# Get location from parsed args - now uses SharedArgs.STORAGE so key is "storage" # Get location from parsed args - now uses SharedArgs.STORAGE so key is "storage"
location = parsed.get("storage") location = parsed.get("storage")
@@ -714,7 +758,7 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
return 1 return 1
log(f"Moving to local path: {destination_root}", file=sys.stderr) log(f"Moving to local path: {destination_root}", file=sys.stderr)
exit_code, dest_path = _handle_local_transfer(media_path, destination_root, result, config) exit_code, dest_path = _handle_local_transfer(media_path, destination_root, result, config, export_mode=True)
# After successful local transfer, emit result for pipeline continuation # After successful local transfer, emit result for pipeline continuation
if exit_code == 0 and dest_path: if exit_code == 0 and dest_path:

View File

@@ -79,6 +79,31 @@ def add(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
except Exception as exc: except Exception as exc:
log(f"Hydrus add-note failed: {exc}") log(f"Hydrus add-note failed: {exc}")
return 1 return 1
# Refresh notes view if we're operating on the currently selected subject
try:
from cmdlets import get_note as get_note_cmd # type: ignore
except Exception:
get_note_cmd = None
if get_note_cmd:
try:
subject = ctx.get_last_result_subject()
if subject is not None:
def norm(val: Any) -> str:
return str(val).lower()
target_hash = norm(hash_hex) if hash_hex else None
subj_hashes = []
if isinstance(subject, dict):
subj_hashes = [norm(v) for v in [subject.get("hydrus_hash"), subject.get("hash"), subject.get("hash_hex"), subject.get("file_hash")] if v]
else:
subj_hashes = [norm(getattr(subject, f, None)) for f in ("hydrus_hash", "hash", "hash_hex", "file_hash") if getattr(subject, f, None)]
if target_hash and target_hash in subj_hashes:
get_note_cmd.get_notes(subject, ["-hash", hash_hex], config)
return 0
except Exception:
pass
ctx.emit(f"Added note '{name}' ({len(text)} chars)") ctx.emit(f"Added note '{name}' ({len(text)} chars)")
return 0 return 0

View File

@@ -145,6 +145,49 @@ def _resolve_king_reference(king_arg: str) -> Optional[str]:
return None return None
def _refresh_relationship_view_if_current(target_hash: Optional[str], target_path: Optional[str], other: Optional[str], config: Dict[str, Any]) -> None:
"""If the current subject matches the target, refresh relationships via get-relationship."""
try:
from cmdlets import get_relationship as get_rel_cmd # type: ignore
except Exception:
return
try:
subject = ctx.get_last_result_subject()
if subject is None:
return
def norm(val: Any) -> str:
return str(val).lower()
target_hashes = [norm(v) for v in [target_hash, other] if v]
target_paths = [norm(v) for v in [target_path, other] if v]
subj_hashes: list[str] = []
subj_paths: list[str] = []
if isinstance(subject, dict):
subj_hashes = [norm(v) for v in [subject.get("hydrus_hash"), subject.get("hash"), subject.get("hash_hex"), subject.get("file_hash")] if v]
subj_paths = [norm(v) for v in [subject.get("file_path"), subject.get("path"), subject.get("target")] if v]
else:
subj_hashes = [norm(getattr(subject, f, None)) for f in ("hydrus_hash", "hash", "hash_hex", "file_hash") if getattr(subject, f, None)]
subj_paths = [norm(getattr(subject, f, None)) for f in ("file_path", "path", "target") if getattr(subject, f, None)]
is_match = False
if target_hashes and any(h in subj_hashes for h in target_hashes):
is_match = True
if target_paths and any(p in subj_paths for p in target_paths):
is_match = True
if not is_match:
return
refresh_args: list[str] = []
if target_hash:
refresh_args.extend(["-hash", target_hash])
get_rel_cmd._run(subject, refresh_args, config)
except Exception:
pass
@register(["add-relationship", "add-rel"]) # primary name and alias @register(["add-relationship", "add-rel"]) # primary name and alias
def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int: def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
"""Associate file relationships in Hydrus. """Associate file relationships in Hydrus.
@@ -253,6 +296,7 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
f"[add-relationship] Set {rel_type} relationship: {file_hash} <-> {king_hash}", f"[add-relationship] Set {rel_type} relationship: {file_hash} <-> {king_hash}",
file=sys.stderr file=sys.stderr
) )
_refresh_relationship_view_if_current(file_hash, file_path_from_result, king_hash, config)
except Exception as exc: except Exception as exc:
log(f"Failed to set relationship: {exc}", file=sys.stderr) log(f"Failed to set relationship: {exc}", file=sys.stderr)
return 1 return 1
@@ -280,6 +324,7 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
f"[add-relationship] Set {rel_type} relationship: {file_hash} <-> {existing_king}", f"[add-relationship] Set {rel_type} relationship: {file_hash} <-> {existing_king}",
file=sys.stderr file=sys.stderr
) )
_refresh_relationship_view_if_current(file_hash, file_path_from_result, existing_king, config)
except Exception as exc: except Exception as exc:
log(f"Failed to set relationship: {exc}", file=sys.stderr) log(f"Failed to set relationship: {exc}", file=sys.stderr)
return 1 return 1
@@ -300,6 +345,7 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
with LocalLibrarySearchOptimizer(local_storage_path) as db: with LocalLibrarySearchOptimizer(local_storage_path) as db:
db.set_relationship(file_path_obj, king_file_path, rel_type) db.set_relationship(file_path_obj, king_file_path, rel_type)
log(f"Set {rel_type} relationship: {file_path_obj.name} -> {king_file_path.name}", file=sys.stderr) log(f"Set {rel_type} relationship: {file_path_obj.name} -> {king_file_path.name}", file=sys.stderr)
_refresh_relationship_view_if_current(None, str(file_path_obj), str(king_file_path), config)
else: else:
log(f"King file not found or invalid: {king_hash}", file=sys.stderr) log(f"King file not found or invalid: {king_hash}", file=sys.stderr)
return 1 return 1
@@ -323,6 +369,7 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
with LocalLibrarySearchOptimizer(local_storage_path) as db: with LocalLibrarySearchOptimizer(local_storage_path) as db:
db.set_relationship(file_path_obj, Path(king_path), rel_type) db.set_relationship(file_path_obj, Path(king_path), rel_type)
log(f"Set {rel_type} relationship: {file_path_obj.name} -> {Path(king_path).name}", file=sys.stderr) log(f"Set {rel_type} relationship: {file_path_obj.name} -> {Path(king_path).name}", file=sys.stderr)
_refresh_relationship_view_if_current(None, str(file_path_obj), str(king_path), config)
except Exception as exc: except Exception as exc:
log(f"Failed to set relationship: {exc}", file=sys.stderr) log(f"Failed to set relationship: {exc}", file=sys.stderr)
return 1 return 1

View File

@@ -28,6 +28,171 @@ def _extract_title_tag(tags: List[str]) -> Optional[str]:
return None return None
def _apply_title_to_result(res: Any, title_value: Optional[str]) -> None:
"""Update result object/dict title fields and columns in-place."""
if not title_value:
return
if isinstance(res, models.PipeObject):
res.title = title_value
# Update columns if present (Title column assumed index 0)
if hasattr(res, "columns") and isinstance(res.columns, list) and res.columns:
label, *_ = res.columns[0]
if str(label).lower() == "title":
res.columns[0] = (res.columns[0][0], title_value)
elif isinstance(res, dict):
res["title"] = title_value
cols = res.get("columns")
if isinstance(cols, list):
updated = []
changed = False
for col in cols:
if isinstance(col, tuple) and len(col) == 2:
label, val = col
if str(label).lower() == "title":
updated.append((label, title_value))
changed = True
else:
updated.append(col)
else:
updated.append(col)
if changed:
res["columns"] = updated
def _matches_target(item: Any, hydrus_hash: Optional[str], file_hash: Optional[str], file_path: Optional[str]) -> bool:
"""Determine whether a result item refers to the given hash/path target."""
hydrus_hash_l = hydrus_hash.lower() if hydrus_hash else None
file_hash_l = file_hash.lower() if file_hash else None
file_path_l = file_path.lower() if file_path else None
def norm(val: Any) -> Optional[str]:
return str(val).lower() if val is not None else None
if isinstance(item, dict):
hashes = [
norm(item.get("hydrus_hash")),
norm(item.get("hash")),
norm(item.get("hash_hex")),
norm(item.get("file_hash")),
]
paths = [
norm(item.get("path")),
norm(item.get("file_path")),
norm(item.get("target")),
]
else:
hashes = [
norm(getattr(item, "hydrus_hash", None)),
norm(getattr(item, "hash_hex", None)),
norm(getattr(item, "file_hash", None)),
]
paths = [
norm(getattr(item, "path", None)),
norm(getattr(item, "file_path", None)),
norm(getattr(item, "target", None)),
]
if hydrus_hash_l and hydrus_hash_l in hashes:
return True
if file_hash_l and file_hash_l in hashes:
return True
if file_path_l and file_path_l in paths:
return True
return False
def _update_item_title_fields(item: Any, new_title: str) -> None:
"""Mutate an item to reflect a new title in plain fields and columns."""
if isinstance(item, models.PipeObject):
item.title = new_title
if hasattr(item, "columns") and isinstance(item.columns, list) and item.columns:
label, *_ = item.columns[0]
if str(label).lower() == "title":
item.columns[0] = (label, new_title)
elif isinstance(item, dict):
item["title"] = new_title
cols = item.get("columns")
if isinstance(cols, list):
updated_cols = []
changed = False
for col in cols:
if isinstance(col, tuple) and len(col) == 2:
label, val = col
if str(label).lower() == "title":
updated_cols.append((label, new_title))
changed = True
else:
updated_cols.append(col)
else:
updated_cols.append(col)
if changed:
item["columns"] = updated_cols
def _refresh_result_table_title(new_title: str, hydrus_hash: Optional[str], file_hash: Optional[str], file_path: Optional[str]) -> None:
"""Refresh the cached result table with an updated title and redisplay it."""
try:
last_table = ctx.get_last_result_table()
items = ctx.get_last_result_items()
if not last_table or not items:
return
updated_items = []
match_found = False
for item in items:
try:
if _matches_target(item, hydrus_hash, file_hash, file_path):
_update_item_title_fields(item, new_title)
match_found = True
except Exception:
pass
updated_items.append(item)
if not match_found:
return
from result_table import ResultTable # Local import to avoid circular dependency
new_table = ResultTable(getattr(last_table, "title", ""), title_width=getattr(last_table, "title_width", 80), max_columns=getattr(last_table, "max_columns", None))
if getattr(last_table, "source_command", None):
new_table.set_source_command(last_table.source_command, getattr(last_table, "source_args", []))
for item in updated_items:
new_table.add_result(item)
ctx.set_last_result_table_preserve_history(new_table, updated_items)
ctx.set_last_result_table_overlay(new_table, updated_items)
except Exception:
pass
def _refresh_tags_view(res: Any, hydrus_hash: Optional[str], file_hash: Optional[str], file_path: Optional[str], config: Dict[str, Any]) -> None:
"""Refresh tag display via get-tag. Prefer current subject; fall back to direct hash refresh."""
try:
from cmdlets import get_tag as get_tag_cmd # type: ignore
except Exception:
return
target_hash = hydrus_hash or file_hash
refresh_args: List[str] = []
if target_hash:
refresh_args = ["-hash", target_hash]
try:
subject = ctx.get_last_result_subject()
if subject and _matches_target(subject, hydrus_hash, file_hash, file_path):
get_tag_cmd._run(subject, refresh_args, config)
return
except Exception:
pass
if target_hash:
try:
get_tag_cmd._run(res, refresh_args, config)
except Exception:
pass
@register(["add-tag", "add-tags"]) @register(["add-tag", "add-tags"])
@@ -148,7 +313,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
# Tags ARE provided - append them to each result and write sidecar files or add to Hydrus # Tags ARE provided - append them to each result and write sidecar files or add to Hydrus
sidecar_count = 0 sidecar_count = 0
removed_tags: List[str] = [] total_new_tags = 0
total_modified = 0
for res in results: for res in results:
# Handle both dict and PipeObject formats # Handle both dict and PipeObject formats
file_path = None file_path = None
@@ -180,9 +346,17 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
hydrus_hash = file_hash hydrus_hash = file_hash
if not storage_source and hydrus_hash and not file_path: if not storage_source and hydrus_hash and not file_path:
storage_source = 'hydrus' storage_source = 'hydrus'
# If we have a file path but no storage source, assume local to avoid sidecar spam
if not storage_source and file_path:
storage_source = 'local'
else: else:
ctx.emit(res) ctx.emit(res)
continue continue
original_tags_lower = {str(t).lower() for t in existing_tags if isinstance(t, str)}
original_tags_snapshot = list(existing_tags)
original_title = _extract_title_tag(original_tags_snapshot)
removed_tags: List[str] = []
# Apply hash override if provided # Apply hash override if provided
if hash_override: if hash_override:
@@ -239,35 +413,47 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
if new_tag not in existing_tags: if new_tag not in existing_tags:
existing_tags.append(new_tag) existing_tags.append(new_tag)
# Compute new tags relative to original
new_tags_added = [t for t in existing_tags if isinstance(t, str) and t.lower() not in original_tags_lower]
total_new_tags += len(new_tags_added)
# Update the result's tags # Update the result's tags
if isinstance(res, models.PipeObject): if isinstance(res, models.PipeObject):
res.extra['tags'] = existing_tags res.extra['tags'] = existing_tags
elif isinstance(res, dict): elif isinstance(res, dict):
res['tags'] = existing_tags res['tags'] = existing_tags
# If a title: tag was added, update the in-memory title so downstream display reflects it immediately # If a title: tag was added, update the in-memory title and columns so downstream display reflects it immediately
title_value = _extract_title_tag(existing_tags) title_value = _extract_title_tag(existing_tags)
if title_value: _apply_title_to_result(res, title_value)
if isinstance(res, models.PipeObject):
res.title = title_value
elif isinstance(res, dict):
res['title'] = title_value
final_tags = existing_tags
# Determine where to add tags: Hydrus, local DB, or sidecar # Determine where to add tags: Hydrus, local DB, or sidecar
if storage_source and storage_source.lower() == 'hydrus': if storage_source and storage_source.lower() == 'hydrus':
# Add tags to Hydrus using the API # Add tags to Hydrus using the API
target_hash = hydrus_hash or file_hash target_hash = hydrus_hash or file_hash
if target_hash: if target_hash:
try: try:
log(f"[add_tags] Adding {len(existing_tags)} tag(s) to Hydrus file: {target_hash}", file=sys.stderr) tags_to_send = [t for t in existing_tags if isinstance(t, str) and t.lower() not in original_tags_lower]
hydrus_client = hydrus_wrapper.get_client(config) hydrus_client = hydrus_wrapper.get_client(config)
hydrus_client.add_tags(target_hash, existing_tags, "my tags") service_name = hydrus_wrapper.get_tag_service_name(config)
if tags_to_send:
log(f"[add_tags] Adding {len(tags_to_send)} new tag(s) to Hydrus file: {target_hash}", file=sys.stderr)
hydrus_client.add_tags(target_hash, tags_to_send, service_name)
else:
log(f"[add_tags] No new tags to add for Hydrus file: {target_hash}", file=sys.stderr)
# Delete old namespace tags we replaced (e.g., previous title:) # Delete old namespace tags we replaced (e.g., previous title:)
if removed_tags: if removed_tags:
unique_removed = sorted(set(removed_tags)) unique_removed = sorted(set(removed_tags))
hydrus_client.delete_tags(target_hash, unique_removed, "my tags") hydrus_client.delete_tags(target_hash, unique_removed, service_name)
log(f"[add_tags] ✓ Tags added to Hydrus", file=sys.stderr) if tags_to_send:
log(f"[add_tags] ✓ Tags added to Hydrus", file=sys.stderr)
elif removed_tags:
log(f"[add_tags] ✓ Removed {len(unique_removed)} tag(s) from Hydrus", file=sys.stderr)
sidecar_count += 1 sidecar_count += 1
if tags_to_send or removed_tags:
total_modified += 1
except Exception as e: except Exception as e:
log(f"[add_tags] Warning: Failed to add tags to Hydrus: {e}", file=sys.stderr) log(f"[add_tags] Warning: Failed to add tags to Hydrus: {e}", file=sys.stderr)
else: else:
@@ -278,10 +464,25 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
library_root = get_local_storage_path(config) library_root = get_local_storage_path(config)
if library_root: if library_root:
try: try:
path_obj = Path(file_path)
with LocalLibraryDB(library_root) as db: with LocalLibraryDB(library_root) as db:
db.save_tags(Path(file_path), existing_tags) db.save_tags(path_obj, existing_tags)
log(f"[add_tags] Saved {len(existing_tags)} tag(s) to local DB", file=sys.stderr) # Reload tags to reflect DB state (preserves auto-title logic)
sidecar_count += 1 refreshed_tags = db.get_tags(path_obj) or existing_tags
# Recompute title from refreshed tags for accurate display
refreshed_title = _extract_title_tag(refreshed_tags)
if refreshed_title:
_apply_title_to_result(res, refreshed_title)
res_tags = refreshed_tags or existing_tags
if isinstance(res, models.PipeObject):
res.extra['tags'] = res_tags
elif isinstance(res, dict):
res['tags'] = res_tags
log(f"[add_tags] Added {len(new_tags_added)} new tag(s); {len(res_tags)} total tag(s) stored locally", file=sys.stderr)
sidecar_count += 1
if new_tags_added or removed_tags:
total_modified += 1
final_tags = res_tags
except Exception as e: except Exception as e:
log(f"[add_tags] Warning: Failed to save tags to local DB: {e}", file=sys.stderr) log(f"[add_tags] Warning: Failed to save tags to local DB: {e}", file=sys.stderr)
else: else:
@@ -289,19 +490,24 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
else: else:
log(f"[add_tags] Warning: No file path for local storage, skipping", file=sys.stderr) log(f"[add_tags] Warning: No file path for local storage, skipping", file=sys.stderr)
else: else:
# For other storage types or unknown sources, write sidecar file if we have a file path # For other storage types or unknown sources, avoid writing sidecars to reduce clutter
if file_path: # (local/hydrus are handled above).
try: ctx.emit(res)
sidecar_path = write_sidecar(Path(file_path), existing_tags, [], file_hash) continue
log(f"[add_tags] Wrote {len(existing_tags)} tag(s) to sidecar: {sidecar_path}", file=sys.stderr)
sidecar_count += 1 # If title changed, refresh the cached result table so the display reflects the new name
except Exception as e: final_title = _extract_title_tag(final_tags)
log(f"[add_tags] Warning: Failed to write sidecar for {file_path}: {e}", file=sys.stderr) if final_title and (not original_title or final_title.lower() != original_title.lower()):
_refresh_result_table_title(final_title, hydrus_hash or file_hash, file_hash, file_path)
# If tags changed, refresh tag view via get-tag (prefer current subject; fall back to hash refresh)
if new_tags_added or removed_tags:
_refresh_tags_view(res, hydrus_hash, file_hash, file_path, config)
# Emit the modified result # Emit the modified result
ctx.emit(res) ctx.emit(res)
log(f"[add_tags] Processed {len(results)} result(s)", file=sys.stderr) log(f"[add_tags] Added {total_new_tags} new tag(s) across {len(results)} item(s); modified {total_modified} item(s)", file=sys.stderr)
return 0 return 0
CMDLET = Cmdlet( CMDLET = Cmdlet(

View File

@@ -13,6 +13,7 @@ from ._shared import Cmdlet, CmdletArg, normalize_hash
from helper.logger import log from helper.logger import log
from config import get_local_storage_path from config import get_local_storage_path
from helper.local_library import LocalLibraryDB from helper.local_library import LocalLibraryDB
from helper.logger import debug
CMDLET = Cmdlet( CMDLET = Cmdlet(
name="add-url", name="add-url",
@@ -124,6 +125,39 @@ def add(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
return 1 return 1
if success: if success:
# If we just mutated the currently displayed item, refresh URLs via get-url
try:
from cmdlets import get_url as get_url_cmd # type: ignore
except Exception:
get_url_cmd = None
if get_url_cmd:
try:
subject = ctx.get_last_result_subject()
if subject is not None:
def norm(val: Any) -> str:
return str(val).lower()
target_hash = norm(hash_hex) if hash_hex else None
target_path = norm(file_path) if 'file_path' in locals() else None
subj_hashes = []
subj_paths = []
if isinstance(subject, dict):
subj_hashes = [norm(v) for v in [subject.get("hydrus_hash"), subject.get("hash"), subject.get("hash_hex"), subject.get("file_hash")] if v]
subj_paths = [norm(v) for v in [subject.get("file_path"), subject.get("path"), subject.get("target")] if v]
else:
subj_hashes = [norm(getattr(subject, f, None)) for f in ("hydrus_hash", "hash", "hash_hex", "file_hash") if getattr(subject, f, None)]
subj_paths = [norm(getattr(subject, f, None)) for f in ("file_path", "path", "target") if getattr(subject, f, None)]
is_match = False
if target_hash and target_hash in subj_hashes:
is_match = True
if target_path and target_path in subj_paths:
is_match = True
if is_match:
refresh_args: list[str] = []
if hash_hex:
refresh_args.extend(["-hash", hash_hex])
get_url_cmd._run(subject, refresh_args, config)
except Exception:
debug("URL refresh skipped (error)")
return 0 return 0
if not hash_hex and not file_path: if not hash_hex and not file_path:

View File

@@ -3,6 +3,7 @@ from __future__ import annotations
from typing import Any, Dict, Sequence from typing import Any, Dict, Sequence
import json import json
import pipeline as ctx
from helper import hydrus as hydrus_wrapper from helper import hydrus as hydrus_wrapper
from ._shared import Cmdlet, CmdletArg, normalize_hash from ._shared import Cmdlet, CmdletArg, normalize_hash
from helper.logger import log from helper.logger import log
@@ -75,5 +76,30 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
except Exception as exc: except Exception as exc:
log(f"Hydrus delete-note failed: {exc}") log(f"Hydrus delete-note failed: {exc}")
return 1 return 1
# Refresh notes view if we're operating on the current subject
try:
from cmdlets import get_note as get_note_cmd # type: ignore
except Exception:
get_note_cmd = None
if get_note_cmd:
try:
subject = ctx.get_last_result_subject()
if subject is not None:
def norm(val: Any) -> str:
return str(val).lower()
target_hash = norm(hash_hex) if hash_hex else None
subj_hashes = []
if isinstance(subject, dict):
subj_hashes = [norm(v) for v in [subject.get("hydrus_hash"), subject.get("hash"), subject.get("hash_hex"), subject.get("file_hash")] if v]
else:
subj_hashes = [norm(getattr(subject, f, None)) for f in ("hydrus_hash", "hash", "hash_hex", "file_hash") if getattr(subject, f, None)]
if target_hash and target_hash in subj_hashes:
get_note_cmd.get_notes(subject, ["-hash", hash_hex], config)
return 0
except Exception:
pass
log(f"Deleted note '{name}'") log(f"Deleted note '{name}'")
return 0 return 0

View File

@@ -15,6 +15,49 @@ from helper.local_library import LocalLibrarySearchOptimizer
from config import get_local_storage_path from config import get_local_storage_path
def _refresh_relationship_view_if_current(target_hash: Optional[str], target_path: Optional[str], other: Optional[str], config: Dict[str, Any]) -> None:
"""If the current subject matches the target, refresh relationships via get-relationship."""
try:
from cmdlets import get_relationship as get_rel_cmd # type: ignore
except Exception:
return
try:
subject = ctx.get_last_result_subject()
if subject is None:
return
def norm(val: Any) -> str:
return str(val).lower()
target_hashes = [norm(v) for v in [target_hash, other] if v]
target_paths = [norm(v) for v in [target_path, other] if v]
subj_hashes: list[str] = []
subj_paths: list[str] = []
if isinstance(subject, dict):
subj_hashes = [norm(v) for v in [subject.get("hydrus_hash"), subject.get("hash"), subject.get("hash_hex"), subject.get("file_hash")] if v]
subj_paths = [norm(v) for v in [subject.get("file_path"), subject.get("path"), subject.get("target")] if v]
else:
subj_hashes = [norm(getattr(subject, f, None)) for f in ("hydrus_hash", "hash", "hash_hex", "file_hash") if getattr(subject, f, None)]
subj_paths = [norm(getattr(subject, f, None)) for f in ("file_path", "path", "target") if getattr(subject, f, None)]
is_match = False
if target_hashes and any(h in subj_hashes for h in target_hashes):
is_match = True
if target_paths and any(p in subj_paths for p in target_paths):
is_match = True
if not is_match:
return
refresh_args: list[str] = []
if target_hash:
refresh_args.extend(["-hash", target_hash])
get_rel_cmd._run(subject, refresh_args, config)
except Exception:
pass
def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int: def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
"""Delete relationships from files. """Delete relationships from files.
@@ -137,6 +180,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
""", (file_id, json.dumps(relationships) if relationships else None)) """, (file_id, json.dumps(relationships) if relationships else None))
db.db.connection.commit() db.db.connection.commit()
_refresh_relationship_view_if_current(None, str(file_path_obj), None, config)
deleted_count += 1 deleted_count += 1
except Exception as exc: except Exception as exc:

View File

@@ -1,6 +1,7 @@
from __future__ import annotations from __future__ import annotations
from typing import Any, Dict, Sequence from typing import Any, Dict, Sequence
from pathlib import Path
import json import json
import sys import sys
@@ -12,6 +13,49 @@ from ._shared import Cmdlet, CmdletArg, normalize_hash, parse_tag_arguments
from helper.logger import debug, log from helper.logger import debug, log
def _refresh_tag_view_if_current(hash_hex: str | None, file_path: str | None, config: Dict[str, Any]) -> None:
"""If the current subject matches the target, refresh tags via get-tag."""
try:
from cmdlets import get_tag as get_tag_cmd # type: ignore
except Exception:
return
try:
subject = ctx.get_last_result_subject()
if subject is None:
return
def norm(val: Any) -> str:
return str(val).lower()
target_hash = norm(hash_hex) if hash_hex else None
target_path = norm(file_path) if file_path else None
subj_hashes: list[str] = []
subj_paths: list[str] = []
if isinstance(subject, dict):
subj_hashes = [norm(v) for v in [subject.get("hydrus_hash"), subject.get("hash"), subject.get("hash_hex"), subject.get("file_hash")] if v]
subj_paths = [norm(v) for v in [subject.get("file_path"), subject.get("path"), subject.get("target")] if v]
else:
subj_hashes = [norm(getattr(subject, f, None)) for f in ("hydrus_hash", "hash", "hash_hex", "file_hash") if getattr(subject, f, None)]
subj_paths = [norm(getattr(subject, f, None)) for f in ("file_path", "path", "target") if getattr(subject, f, None)]
is_match = False
if target_hash and target_hash in subj_hashes:
is_match = True
if target_path and target_path in subj_paths:
is_match = True
if not is_match:
return
refresh_args: list[str] = []
if hash_hex:
refresh_args.extend(["-hash", hash_hex])
get_tag_cmd._run(subject, refresh_args, config)
except Exception:
pass
CMDLET = Cmdlet( CMDLET = Cmdlet(
name="delete-tags", name="delete-tags",
summary="Remove tags from a Hydrus file.", summary="Remove tags from a Hydrus file.",
@@ -220,12 +264,69 @@ def _process_deletion(tags: list[str], hash_hex: str | None, file_path: str | No
if not tags: if not tags:
return False return False
def _fetch_existing_tags() -> list[str]:
existing: list[str] = []
# Prefer local DB when we have a path and not explicitly hydrus
if file_path and (source == "local" or (source != "hydrus" and not hash_hex)):
try:
from helper.local_library import LocalLibraryDB
from config import get_local_storage_path
path_obj = Path(file_path)
local_root = get_local_storage_path(config) or path_obj.parent
with LocalLibraryDB(local_root) as db:
existing = db.get_tags(path_obj) or []
except Exception:
existing = []
elif hash_hex:
try:
client = hydrus_wrapper.get_client(config)
payload = client.fetch_file_metadata(
hashes=[hash_hex],
include_service_keys_to_tags=True,
include_file_urls=False,
)
items = payload.get("metadata") if isinstance(payload, dict) else None
meta = items[0] if isinstance(items, list) and items else None
if isinstance(meta, dict):
tags_payload = meta.get("tags")
if isinstance(tags_payload, dict):
seen: set[str] = set()
for svc_data in tags_payload.values():
if not isinstance(svc_data, dict):
continue
display = svc_data.get("display_tags")
if isinstance(display, list):
for t in display:
if isinstance(t, (str, bytes)):
val = str(t).strip()
if val and val not in seen:
seen.add(val)
existing.append(val)
storage = svc_data.get("storage_tags")
if isinstance(storage, dict):
current_list = storage.get("0") or storage.get(0)
if isinstance(current_list, list):
for t in current_list:
if isinstance(t, (str, bytes)):
val = str(t).strip()
if val and val not in seen:
seen.add(val)
existing.append(val)
except Exception:
existing = []
return existing
# Safety: block deleting title: without replacement to avoid untitled files # Safety: only block if this deletion would remove the final title tag
title_tags = [t for t in tags if isinstance(t, str) and t.lower().startswith("title:")] title_tags = [t for t in tags if isinstance(t, str) and t.lower().startswith("title:")]
if title_tags: if title_tags:
log("Cannot delete title: tag without replacement. Use add-tag \"title:new title\" instead.", file=sys.stderr) existing_tags = _fetch_existing_tags()
return False current_titles = [t for t in existing_tags if isinstance(t, str) and t.lower().startswith("title:")]
del_title_set = {t.lower() for t in title_tags}
remaining_titles = [t for t in current_titles if t.lower() not in del_title_set]
if current_titles and not remaining_titles:
log("Cannot delete the last title: tag. Add a replacement title first (add-tag \"title:new title\").", file=sys.stderr)
return False
if not hash_hex and not file_path: if not hash_hex and not file_path:
log("Item does not include a hash or file path") log("Item does not include a hash or file path")
@@ -253,6 +354,7 @@ def _process_deletion(tags: list[str], hash_hex: str | None, file_path: str | No
with LocalLibraryDB(local_root) as db: with LocalLibraryDB(local_root) as db:
db.remove_tags(path_obj, tags) db.remove_tags(path_obj, tags)
debug(f"Removed {len(tags)} tag(s) from {path_obj.name} (local)") debug(f"Removed {len(tags)} tag(s) from {path_obj.name} (local)")
_refresh_tag_view_if_current(hash_hex, file_path, config)
return True return True
except Exception as exc: except Exception as exc:
@@ -276,6 +378,7 @@ def _process_deletion(tags: list[str], hash_hex: str | None, file_path: str | No
preview = hash_hex[:12] + ('' if len(hash_hex) > 12 else '') preview = hash_hex[:12] + ('' if len(hash_hex) > 12 else '')
debug(f"Removed {len(tags)} tag(s) from {preview} via '{service_name}'.") debug(f"Removed {len(tags)} tag(s) from {preview} via '{service_name}'.")
_refresh_tag_view_if_current(hash_hex, None, config)
return True return True
except Exception as exc: except Exception as exc:

View File

@@ -8,7 +8,7 @@ from pathlib import Path
from . import register from . import register
from helper import hydrus as hydrus_wrapper from helper import hydrus as hydrus_wrapper
from ._shared import Cmdlet, CmdletArg, normalize_hash from ._shared import Cmdlet, CmdletArg, normalize_hash
from helper.logger import log from helper.logger import debug, log
from config import get_local_storage_path from config import get_local_storage_path
from helper.local_library import LocalLibraryDB from helper.local_library import LocalLibraryDB
import pipeline as ctx import pipeline as ctx
@@ -152,5 +152,43 @@ def _delete_single(result: Any, url: str, override_hash: str | None, config: Dic
success = True success = True
except Exception as exc: except Exception as exc:
log(f"Hydrus del-url failed: {exc}", file=sys.stderr) log(f"Hydrus del-url failed: {exc}", file=sys.stderr)
if success:
try:
from cmdlets import get_url as get_url_cmd # type: ignore
except Exception:
get_url_cmd = None
if get_url_cmd:
try:
subject = ctx.get_last_result_subject()
if subject is not None:
def norm(val: Any) -> str:
return str(val).lower()
target_hash = norm(hash_hex) if hash_hex else None
target_path = norm(file_path) if file_path else None
subj_hashes = []
subj_paths = []
if isinstance(subject, dict):
subj_hashes = [norm(v) for v in [subject.get("hydrus_hash"), subject.get("hash"), subject.get("hash_hex"), subject.get("file_hash")] if v]
subj_paths = [norm(v) for v in [subject.get("file_path"), subject.get("path"), subject.get("target")] if v]
else:
subj_hashes = [norm(getattr(subject, f, None)) for f in ("hydrus_hash", "hash", "hash_hex", "file_hash") if getattr(subject, f, None)]
subj_paths = [norm(getattr(subject, f, None)) for f in ("file_path", "path", "target") if getattr(subject, f, None)]
is_match = False
if target_hash and target_hash in subj_hashes:
is_match = True
if target_path and target_path in subj_paths:
is_match = True
if is_match:
refresh_args: list[str] = []
if hash_hex:
refresh_args.extend(["-hash", hash_hex])
get_url_cmd._run(subject, refresh_args, config)
except Exception:
debug("URL refresh skipped (error)")
return success return success

View File

@@ -21,7 +21,7 @@ from typing import Any, Dict, List, Optional, Sequence, Tuple
import pipeline as ctx import pipeline as ctx
from helper import hydrus from helper import hydrus
from helper.local_library import read_sidecar, write_sidecar, find_sidecar, LocalLibraryDB from helper.local_library import read_sidecar, write_sidecar, find_sidecar, LocalLibraryDB
from ._shared import normalize_hash, Cmdlet, CmdletArg, SharedArgs, parse_cmdlet_args from ._shared import normalize_hash, looks_like_hash, Cmdlet, CmdletArg, SharedArgs, parse_cmdlet_args
from config import get_local_storage_path from config import get_local_storage_path
@@ -105,7 +105,8 @@ def _emit_tags_as_table(
service_name: Optional[str] = None, service_name: Optional[str] = None,
config: Dict[str, Any] = None, config: Dict[str, Any] = None,
item_title: Optional[str] = None, item_title: Optional[str] = None,
file_path: Optional[str] = None file_path: Optional[str] = None,
subject: Optional[Any] = None,
) -> None: ) -> None:
"""Emit tags as TagItem objects and display via ResultTable. """Emit tags as TagItem objects and display via ResultTable.
@@ -144,9 +145,9 @@ def _emit_tags_as_table(
# Use overlay mode so it doesn't push the previous search to history stack # Use overlay mode so it doesn't push the previous search to history stack
# This makes get-tag behave like a transient view # This makes get-tag behave like a transient view
try: try:
ctx.set_last_result_table_overlay(table, tag_items) ctx.set_last_result_table_overlay(table, tag_items, subject)
except AttributeError: except AttributeError:
ctx.set_last_result_table(table, tag_items) ctx.set_last_result_table(table, tag_items, subject)
# Note: CLI will handle displaying the table via ResultTable formatting # Note: CLI will handle displaying the table via ResultTable formatting
def _summarize_tags(tags_list: List[str], limit: int = 8) -> str: def _summarize_tags(tags_list: List[str], limit: int = 8) -> str:
"""Create a summary of tags for display.""" """Create a summary of tags for display."""
@@ -443,7 +444,10 @@ def _emit_tag_payload(source: str, tags_list: List[str], *, hash_value: Optional
def _extract_scrapable_identifiers(tags_list: List[str]) -> Dict[str, str]: def _extract_scrapable_identifiers(tags_list: List[str]) -> Dict[str, str]:
"""Extract scrapable identifiers from tags.""" """Extract scrapable identifiers from tags."""
identifiers = {} identifiers = {}
scrapable_prefixes = {'openlibrary', 'isbn_10', 'isbn', 'musicbrainz', 'musicbrainzalbum', 'imdb', 'tmdb', 'tvdb'} scrapable_prefixes = {
'openlibrary', 'isbn', 'isbn_10', 'isbn_13',
'musicbrainz', 'musicbrainzalbum', 'imdb', 'tmdb', 'tvdb'
}
for tag in tags_list: for tag in tags_list:
if not isinstance(tag, str) or ':' not in tag: if not isinstance(tag, str) or ':' not in tag:
@@ -453,9 +457,18 @@ def _extract_scrapable_identifiers(tags_list: List[str]) -> Dict[str, str]:
if len(parts) != 2: if len(parts) != 2:
continue continue
key = parts[0].strip().lower() key_raw = parts[0].strip().lower()
key = key_raw.replace('-', '_')
if key == 'isbn10':
key = 'isbn_10'
elif key == 'isbn13':
key = 'isbn_13'
value = parts[1].strip() value = parts[1].strip()
# Normalize ISBN values by removing hyphens for API friendliness
if key.startswith('isbn'):
value = value.replace('-', '')
if key in scrapable_prefixes and value: if key in scrapable_prefixes and value:
identifiers[key] = value identifiers[key] = value
@@ -965,8 +978,8 @@ def _perform_scraping(tags_list: List[str]) -> List[str]:
if olid: if olid:
log(f"Scraping OpenLibrary: {olid}") log(f"Scraping OpenLibrary: {olid}")
new_tags.extend(_scrape_openlibrary_metadata(olid)) new_tags.extend(_scrape_openlibrary_metadata(olid))
elif 'isbn_10' in identifiers or 'isbn' in identifiers: elif 'isbn_13' in identifiers or 'isbn_10' in identifiers or 'isbn' in identifiers:
isbn = identifiers.get('isbn_10') or identifiers.get('isbn') isbn = identifiers.get('isbn_13') or identifiers.get('isbn_10') or identifiers.get('isbn')
if isbn: if isbn:
log(f"Scraping ISBN: {isbn}") log(f"Scraping ISBN: {isbn}")
new_tags.extend(_scrape_isbn_metadata(isbn)) new_tags.extend(_scrape_isbn_metadata(isbn))
@@ -991,13 +1004,13 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
Usage: Usage:
get-tag [-hash <sha256>] [--store <key>] [--emit] get-tag [-hash <sha256>] [--store <key>] [--emit]
get-tag -scrape <url> get-tag -scrape <url|provider>
Options: Options:
-hash <sha256>: Override hash to use instead of result's hash_hex -hash <sha256>: Override hash to use instead of result's hash_hex
--store <key>: Store result to this key for pipeline --store <key>: Store result to this key for pipeline
--emit: Emit result without interactive prompt (quiet mode) --emit: Emit result without interactive prompt (quiet mode)
-scrape <url>: Scrape metadata from URL (returns tags as JSON) -scrape <url|provider>: Scrape metadata from URL or provider name (itunes, openlibrary, googlebooks)
""" """
# Helper to get field from both dict and object # Helper to get field from both dict and object
def get_field(obj: Any, field: str, default: Any = None) -> Any: def get_field(obj: Any, field: str, default: Any = None) -> Any:
@@ -1008,13 +1021,26 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
# Parse arguments using shared parser # Parse arguments using shared parser
parsed_args = parse_cmdlet_args(args, CMDLET) parsed_args = parse_cmdlet_args(args, CMDLET)
# Detect if -scrape flag was provided without a value (parse_cmdlet_args skips missing values)
scrape_flag_present = any(str(arg).lower() in {"-scrape", "--scrape"} for arg in args)
# Extract values # Extract values
hash_override = normalize_hash(parsed_args.get("hash")) hash_override_raw = parsed_args.get("hash")
hash_override = normalize_hash(hash_override_raw)
store_key = parsed_args.get("store") store_key = parsed_args.get("store")
emit_requested = parsed_args.get("emit", False) emit_requested = parsed_args.get("emit", False)
scrape_url = parsed_args.get("scrape") scrape_url = parsed_args.get("scrape")
scrape_requested = scrape_url is not None scrape_requested = scrape_flag_present or scrape_url is not None
if hash_override_raw is not None:
if not hash_override or not looks_like_hash(hash_override):
log("Invalid hash format: expected 64 hex characters", file=sys.stderr)
return 1
if scrape_requested and (not scrape_url or str(scrape_url).strip() == ""):
log("-scrape requires a URL or provider name", file=sys.stderr)
return 1
# Handle URL or provider scraping mode # Handle URL or provider scraping mode
if scrape_requested and scrape_url: if scrape_requested and scrape_url:
@@ -1041,18 +1067,51 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
log(f"Unknown metadata provider: {scrape_url}", file=sys.stderr) log(f"Unknown metadata provider: {scrape_url}", file=sys.stderr)
return 1 return 1
# Determine query from title on the result or filename # Prefer identifier tags (ISBN/OLID/etc.) when available; fallback to title/filename
identifier_tags: List[str] = []
result_tags = get_field(result, "tags", None)
if isinstance(result_tags, list):
identifier_tags = [str(t) for t in result_tags if isinstance(t, (str, bytes))]
# Try local sidecar if no tags present on result
if not identifier_tags:
file_path = get_field(result, "target", None) or get_field(result, "path", None) or get_field(result, "file_path", None) or get_field(result, "filename", None)
if isinstance(file_path, str) and file_path and not file_path.lower().startswith(("http://", "https://")):
try:
media_path = Path(str(file_path))
if media_path.exists():
tags_from_sidecar = read_sidecar(media_path)
if isinstance(tags_from_sidecar, list):
identifier_tags = [str(t) for t in tags_from_sidecar if isinstance(t, (str, bytes))]
except Exception:
pass
identifiers = _extract_scrapable_identifiers(identifier_tags)
identifier_query: Optional[str] = None
if identifiers:
if provider.name in {"openlibrary", "googlebooks", "google"}:
identifier_query = identifiers.get("isbn_13") or identifiers.get("isbn_10") or identifiers.get("isbn") or identifiers.get("openlibrary")
elif provider.name == "itunes":
identifier_query = identifiers.get("musicbrainz") or identifiers.get("musicbrainzalbum")
# Determine query from identifier first, else title on the result or filename
title_hint = get_field(result, "title", None) or get_field(result, "name", None) title_hint = get_field(result, "title", None) or get_field(result, "name", None)
if not title_hint: if not title_hint:
file_path = get_field(result, "path", None) or get_field(result, "filename", None) file_path = get_field(result, "path", None) or get_field(result, "filename", None)
if file_path: if file_path:
title_hint = Path(str(file_path)).stem title_hint = Path(str(file_path)).stem
if not title_hint: query_hint = identifier_query or title_hint
log("No title available to search for metadata", file=sys.stderr) if not query_hint:
log("No title or identifier available to search for metadata", file=sys.stderr)
return 1 return 1
items = provider.search(title_hint, limit=10) if identifier_query:
log(f"Using identifier for metadata search: {identifier_query}")
else:
log(f"Using title for metadata search: {query_hint}")
items = provider.search(query_hint, limit=10)
if not items: if not items:
log("No metadata results found", file=sys.stderr) log("No metadata results found", file=sys.stderr)
return 1 return 1
@@ -1212,11 +1271,46 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
# Always output to ResultTable (pipeline mode only) # Always output to ResultTable (pipeline mode only)
# Extract title for table header # Extract title for table header
item_title = get_field(result, "title", None) or get_field(result, "name", None) or get_field(result, "filename", None) item_title = get_field(result, "title", None) or get_field(result, "name", None) or get_field(result, "filename", None)
# Build a subject payload representing the file whose tags are being shown
subject_origin = get_field(result, "origin", None) or get_field(result, "source", None) or source
subject_payload: Dict[str, Any] = {
"tags": list(current),
"title": item_title,
"name": item_title,
"origin": subject_origin,
"source": subject_origin,
"storage_source": subject_origin,
"service_name": service_name,
"extra": {
"tags": list(current),
"storage_source": subject_origin,
"hydrus_hash": hash_hex,
},
}
if hash_hex:
subject_payload.update({
"hash": hash_hex,
"hash_hex": hash_hex,
"file_hash": hash_hex,
"hydrus_hash": hash_hex,
})
if local_path:
try:
path_text = str(local_path)
subject_payload.update({
"file_path": path_text,
"path": path_text,
"target": path_text,
})
subject_payload["extra"]["file_path"] = path_text
except Exception:
pass
if source == "hydrus": if source == "hydrus":
_emit_tags_as_table(current, hash_hex=hash_hex, source="hydrus", service_name=service_name, config=config, item_title=item_title) _emit_tags_as_table(current, hash_hex=hash_hex, source="hydrus", service_name=service_name, config=config, item_title=item_title, subject=subject_payload)
else: else:
_emit_tags_as_table(current, hash_hex=hash_hex, source="local", service_name=None, config=config, item_title=item_title, file_path=str(local_path) if local_path else None) _emit_tags_as_table(current, hash_hex=hash_hex, source="local", service_name=None, config=config, item_title=item_title, file_path=str(local_path) if local_path else None, subject=subject_payload)
# If emit requested or store key provided, emit payload # If emit requested or store key provided, emit payload
if emit_mode: if emit_mode:

View File

@@ -5,6 +5,7 @@ from typing import Any, Dict, Sequence, List, Optional, Tuple, Callable
from fnmatch import fnmatchcase from fnmatch import fnmatchcase
from pathlib import Path from pathlib import Path
from dataclasses import dataclass, field from dataclasses import dataclass, field
from collections import OrderedDict
import json import json
import os import os
import sys import sys
@@ -135,45 +136,46 @@ STORAGE_ORIGINS = {"local", "hydrus", "debrid"}
def _ensure_storage_columns(payload: Dict[str, Any]) -> Dict[str, Any]: def _ensure_storage_columns(payload: Dict[str, Any]) -> Dict[str, Any]:
"""Attach Title/Store columns for storage-origin results to keep CLI display compact.""" """Attach Title/Store columns for storage-origin results to keep CLI display compact."""
origin_value = str(payload.get("origin") or payload.get("source") or "").lower() origin_value = str(payload.get("origin") or payload.get("source") or "").lower()
if origin_value not in STORAGE_ORIGINS: if origin_value not in STORAGE_ORIGINS:
return payload return payload
title = payload.get("title") or payload.get("name") or payload.get("target") or payload.get("path") or "Result"
store_label = payload.get("origin") or payload.get("source") or origin_value
# Handle extension
extension = payload.get("ext", "")
if not extension and title:
path_obj = Path(str(title))
if path_obj.suffix:
extension = path_obj.suffix.lstrip('.')
title = path_obj.stem
# Handle size title = payload.get("title") or payload.get("name") or payload.get("target") or payload.get("path") or "Result"
size_val = payload.get("size") or payload.get("size_bytes") store_label = payload.get("origin") or payload.get("source") or origin_value
size_str = ""
if size_val:
try:
size_bytes = int(size_val)
size_mb = size_bytes / (1024 * 1024)
size_str = f"{int(size_mb)} MB"
except (ValueError, TypeError):
size_str = str(size_val)
normalized = dict(payload) # Handle extension
normalized["columns"] = [ extension = payload.get("ext", "")
("Title", str(title)), if not extension and title:
("Ext", str(extension)), path_obj = Path(str(title))
("Store", str(store_label)), if path_obj.suffix:
("Size", str(size_str)) extension = path_obj.suffix.lstrip('.')
] title = path_obj.stem
return normalized
# Handle size as integer MB (header will include units)
size_val = payload.get("size") or payload.get("size_bytes")
size_str = ""
if size_val is not None:
try:
size_bytes = int(size_val)
size_mb = int(size_bytes / (1024 * 1024))
size_str = str(size_mb)
except (ValueError, TypeError):
size_str = str(size_val)
normalized = dict(payload)
normalized["columns"] = [
("Title", str(title)),
("Ext", str(extension)),
("Store", str(store_label)),
("Size(Mb)", str(size_str)),
]
return normalized
CMDLET = Cmdlet( CMDLET = Cmdlet(
name="search-file", name="search-file",
summary="Unified search cmdlet for searchable backends (Hydrus, Local, Debrid, LibGen, OpenLibrary, Soulseek).", summary="Unified search cmdlet for storage (Hydrus, Local) and providers (Debrid, LibGen, OpenLibrary, Soulseek).",
usage="search-file [query] [-tag TAG] [-size >100MB|<50MB] [-type audio|video|image] [-duration >10:00] [-storage BACKEND] [-provider PROVIDER]", usage="search-file [query] [-tag TAG] [-size >100MB|<50MB] [-type audio|video|image] [-duration >10:00] [-storage BACKEND] [-provider PROVIDER]",
args=[ args=[
CmdletArg("query", description="Search query string"), CmdletArg("query", description="Search query string"),
@@ -182,11 +184,11 @@ CMDLET = Cmdlet(
CmdletArg("type", description="Filter by type: audio, video, image, document"), CmdletArg("type", description="Filter by type: audio, video, image, document"),
CmdletArg("duration", description="Filter by duration: >10:00, <1:30:00"), CmdletArg("duration", description="Filter by duration: >10:00, <1:30:00"),
CmdletArg("limit", type="integer", description="Limit results (default: 45)"), CmdletArg("limit", type="integer", description="Limit results (default: 45)"),
CmdletArg("storage", description="Search storage backend: hydrus, local, debrid (default: all searchable)"), CmdletArg("storage", description="Search storage backend: hydrus, local (default: all searchable storages)"),
CmdletArg("provider", description="Search provider: libgen, openlibrary, soulseek, debrid, local (overrides -storage)"), CmdletArg("provider", description="Search provider: libgen, openlibrary, soulseek, debrid, local (overrides -storage)"),
], ],
details=[ details=[
"Search across multiple providers: File storage (Hydrus, Local, Debrid), Books (LibGen, OpenLibrary), Music (Soulseek)", "Search across storage (Hydrus, Local) and providers (Debrid, LibGen, OpenLibrary, Soulseek)",
"Use -provider to search a specific source, or -storage to search file backends", "Use -provider to search a specific source, or -storage to search file backends",
"Filter results by: tag, size, type, duration", "Filter results by: tag, size, type, duration",
"Results can be piped to other commands", "Results can be piped to other commands",
@@ -216,6 +218,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
storage_backend: Optional[str] = None storage_backend: Optional[str] = None
provider_name: Optional[str] = None provider_name: Optional[str] = None
limit = 45 limit = 45
searched_backends: List[str] = []
# Simple argument parsing # Simple argument parsing
i = 0 i = 0
@@ -249,6 +252,11 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
i += 1 i += 1
else: else:
i += 1 i += 1
# Debrid is provider-only now
if storage_backend and storage_backend.lower() == "debrid":
log("Use -provider debrid instead of -storage debrid (debrid is provider-only)", file=sys.stderr)
return 1
# Handle piped input (e.g. from @N selection) if query is empty # Handle piped input (e.g. from @N selection) if query is empty
if not query and result: if not query and result:
@@ -351,7 +359,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
db.update_worker_status(worker_id, 'completed') db.update_worker_status(worker_id, 'completed')
return 0 return 0
# Otherwise search using FileStorage (Hydrus, Local, Debrid backends) # Otherwise search using storage backends (Hydrus, Local)
from helper.file_storage import FileStorage from helper.file_storage import FileStorage
storage = FileStorage(config=config or {}) storage = FileStorage(config=config or {})
@@ -364,6 +372,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
log(f"Backend 'hydrus' is not available (Hydrus service not running)", file=sys.stderr) log(f"Backend 'hydrus' is not available (Hydrus service not running)", file=sys.stderr)
db.update_worker_status(worker_id, 'error') db.update_worker_status(worker_id, 'error')
return 1 return 1
searched_backends.append(backend_to_search)
if not storage.supports_search(backend_to_search): if not storage.supports_search(backend_to_search):
log(f"Backend '{backend_to_search}' does not support searching", file=sys.stderr) log(f"Backend '{backend_to_search}' does not support searching", file=sys.stderr)
db.update_worker_status(worker_id, 'error') db.update_worker_status(worker_id, 'error')
@@ -379,6 +388,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
# Skip hydrus if not available # Skip hydrus if not available
if backend_name == "hydrus" and not hydrus_available: if backend_name == "hydrus" and not hydrus_available:
continue continue
searched_backends.append(backend_name)
try: try:
backend_results = storage[backend_name].search(query, limit=limit - len(all_results)) backend_results = storage[backend_name].search(query, limit=limit - len(all_results))
if backend_results: if backend_results:
@@ -388,25 +398,65 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
except Exception as exc: except Exception as exc:
log(f"Backend {backend_name} search failed: {exc}", file=sys.stderr) log(f"Backend {backend_name} search failed: {exc}", file=sys.stderr)
results = all_results[:limit] results = all_results[:limit]
# Also query Debrid provider by default (provider-only, but keep legacy coverage when no explicit provider given)
if not provider_name and not storage_backend:
try:
debrid_provider = get_provider("debrid", config)
if debrid_provider and debrid_provider.validate():
remaining = max(0, limit - len(results)) if isinstance(results, list) else limit
if remaining > 0:
debrid_results = debrid_provider.search(query, limit=remaining)
if debrid_results:
if "debrid" not in searched_backends:
searched_backends.append("debrid")
if results is None:
results = []
results.extend(debrid_results)
except Exception as exc:
log(f"Debrid provider search failed: {exc}", file=sys.stderr)
def _format_storage_label(name: str) -> str:
clean = str(name or "").strip()
if not clean:
return "Unknown"
return clean.replace("_", " ").title()
storage_counts: OrderedDict[str, int] = OrderedDict((name, 0) for name in searched_backends)
for item in results or []:
origin = getattr(item, 'origin', None)
if origin is None and isinstance(item, dict):
origin = item.get('origin') or item.get('source')
if not origin:
continue
key = str(origin).lower()
if key not in storage_counts:
storage_counts[key] = 0
storage_counts[key] += 1
if storage_counts or query:
display_counts = OrderedDict((_format_storage_label(name), count) for name, count in storage_counts.items())
summary_line = table.set_storage_summary(display_counts, query, inline=True)
if summary_line:
table.title = summary_line
# Emit results and collect for workers table # Emit results and collect for workers table
if results: if results:
for item in results: for item in results:
# Add to table def _as_dict(obj: Any) -> Dict[str, Any]:
table.add_result(item) if isinstance(obj, dict):
return dict(obj)
if isinstance(item, dict): if hasattr(obj, "to_dict") and callable(getattr(obj, "to_dict")):
normalized = _ensure_storage_columns(item) return obj.to_dict() # type: ignore[arg-type]
results_list.append(normalized) return {"title": str(obj)}
ctx.emit(normalized)
elif isinstance(item, ResultItem): item_dict = _as_dict(item)
item_dict = item.to_dict() normalized = _ensure_storage_columns(item_dict)
results_list.append(item_dict) # Add to table using normalized columns to avoid extra fields (e.g., Tags/Name)
ctx.emit(item_dict) table.add_result(normalized)
else:
item_dict = {"title": str(item)} results_list.append(normalized)
results_list.append(item_dict) ctx.emit(normalized)
ctx.emit(item_dict)
# Set the result table in context for TUI/CLI display # Set the result table in context for TUI/CLI display
ctx.set_last_result_table(table, results_list) ctx.set_last_result_table(table, results_list)

View File

@@ -632,6 +632,16 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
debug("MPV is starting up...") debug("MPV is starting up...")
return 0 return 0
else: else:
# Do not auto-launch MPV when no action/inputs were provided; avoid surprise startups
no_inputs = not any([
result, url_arg, index_arg, clear_mode, play_mode,
pause_mode, save_mode, load_mode, current_mode, list_mode
])
if no_inputs:
debug("MPV is not running. Skipping auto-launch (no inputs).", file=sys.stderr)
return 1
debug("MPV is not running. Starting new instance...") debug("MPV is not running. Starting new instance...")
_start_mpv([], config=config) _start_mpv([], config=config)
return 0 return 0
@@ -716,8 +726,6 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
is_current = item.get("current", False) is_current = item.get("current", False)
title = _extract_title_from_item(item) title = _extract_title_from_item(item)
store = _infer_store_from_playlist_item(item) store = _infer_store_from_playlist_item(item)
filename = item.get("filename", "") if isinstance(item, dict) else ""
display_loc = _format_playlist_location(filename)
# Truncate if too long # Truncate if too long
if len(title) > 80: if len(title) > 80:
@@ -727,7 +735,6 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
row.add_column("Current", "*" if is_current else "") row.add_column("Current", "*" if is_current else "")
row.add_column("Store", store) row.add_column("Store", store)
row.add_column("Title", title) row.add_column("Title", title)
row.add_column("Filename", display_loc)
table.set_row_selection_args(i, [str(i + 1)]) table.set_row_selection_args(i, [str(i + 1)])

View File

@@ -381,6 +381,81 @@ class LocalStorageBackend(StorageBackend):
"size_bytes": size_bytes, "size_bytes": size_bytes,
"tags": tags, "tags": tags,
}) })
if limit is not None and len(results) >= limit:
return results
# Title-tag search: treat freeform terms as title namespace queries (AND across terms)
if terms:
title_hits: dict[int, dict[str, Any]] = {}
for term in terms:
cursor.execute(
"""
SELECT DISTINCT f.id, f.file_path, f.file_size
FROM files f
JOIN tags t ON f.id = t.file_id
WHERE LOWER(t.tag) LIKE ?
ORDER BY f.file_path
LIMIT ?
""",
(f"title:%{term}%", fetch_limit),
)
for file_id, file_path_str, size_bytes in cursor.fetchall():
if not file_path_str:
continue
entry = title_hits.get(file_id)
if entry:
entry["count"] += 1
if size_bytes is not None:
entry["size"] = size_bytes
else:
title_hits[file_id] = {
"path": file_path_str,
"size": size_bytes,
"count": 1,
}
if title_hits:
required = len(terms)
for file_id, info in title_hits.items():
if info.get("count") != required:
continue
file_path_str = info.get("path")
if not file_path_str or file_path_str in seen_files:
continue
file_path = Path(file_path_str)
if not file_path.exists():
continue
seen_files.add(file_path_str)
size_bytes = info.get("size")
if size_bytes is None:
try:
size_bytes = file_path.stat().st_size
except OSError:
size_bytes = None
cursor.execute(
"""
SELECT tag FROM tags WHERE file_id = ?
""",
(file_id,),
)
tags = [row[0] for row in cursor.fetchall()]
title_tag = next((t.split(':', 1)[1] for t in tags if t.lower().startswith('title:')), None)
results.append({
"name": file_path.stem,
"title": title_tag or file_path.stem,
"ext": file_path.suffix.lstrip('.'),
"path": str(file_path),
"target": str(file_path),
"origin": "local",
"size": size_bytes,
"size_bytes": size_bytes,
"tags": tags,
})
if limit is not None and len(results) >= limit:
return results
# Also search for simple tags (without namespace) containing the query # Also search for simple tags (without namespace) containing the query
# Only perform tag search if single term, or if we want to support multi-term tag search # Only perform tag search if single term, or if we want to support multi-term tag search
@@ -697,28 +772,35 @@ class HydrusStorageBackend(StorageBackend):
# debug(f"[HydrusBackend.search] Processing file_id={file_id}, tags type={type(tags_set)}") # debug(f"[HydrusBackend.search] Processing file_id={file_id}, tags type={type(tags_set)}")
if isinstance(tags_set, dict): if isinstance(tags_set, dict):
# debug(f"[HydrusBackend.search] Tags payload keys: {list(tags_set.keys())}") # Collect both storage_tags and display_tags to capture siblings/parents and ensure title: is seen
def _collect(tag_list: Any) -> None:
nonlocal title, all_tags_str
if not isinstance(tag_list, list):
return
for tag in tag_list:
tag_text = str(tag) if tag else ""
if not tag_text:
continue
all_tags.append(tag_text)
all_tags_str += " " + tag_text.lower()
if tag_text.lower().startswith("title:") and title == f"Hydrus File {file_id}":
title = tag_text.split(":", 1)[1].strip()
for service_name, service_tags in tags_set.items(): for service_name, service_tags in tags_set.items():
# debug(f"[HydrusBackend.search] Processing service: {service_name}") if not isinstance(service_tags, dict):
if isinstance(service_tags, dict): continue
storage_tags = service_tags.get("storage_tags", {})
if isinstance(storage_tags, dict): storage_tags = service_tags.get("storage_tags", {})
for tag_type, tag_list in storage_tags.items(): if isinstance(storage_tags, dict):
# debug(f"[HydrusBackend.search] Tag type: {tag_type}, count: {len(tag_list) if isinstance(tag_list, list) else 0}") for tag_list in storage_tags.values():
if isinstance(tag_list, list): _collect(tag_list)
for tag in tag_list:
tag_text = str(tag) if tag else "" display_tags = service_tags.get("display_tags", [])
if tag_text: _collect(display_tags)
# debug(f"[HydrusBackend.search] Tag: {tag_text}")
all_tags.append(tag_text) # Also consider top-level flattened tags payload if provided (Hydrus API sometimes includes it)
all_tags_str += " " + tag_text.lower() top_level_tags = meta.get("tags_flat", []) or meta.get("tags", [])
# Extract title: namespace _collect(top_level_tags)
if tag_text.startswith("title:"):
title = tag_text[6:].strip() # Remove "title:" prefix
# debug(f"[HydrusBackend.search] ✓ Extracted title: {title}")
break
if title != f"Hydrus File {file_id}":
break
# Resolve extension from MIME type # Resolve extension from MIME type
mime_type = meta.get("mime") mime_type = meta.get("mime")
@@ -796,202 +878,6 @@ class HydrusStorageBackend(StorageBackend):
import traceback import traceback
traceback.print_exc(file=sys.stderr) traceback.print_exc(file=sys.stderr)
raise raise
class DebridStorageBackend(StorageBackend):
"""File storage backend for Debrid services (AllDebrid, RealDebrid, etc.)."""
def __init__(self, api_key: Optional[str] = None) -> None:
"""Initialize Debrid storage backend.
Args:
api_key: API key for Debrid service (e.g., from config["Debrid"]["All-debrid"])
"""
self._api_key = api_key
def get_name(self) -> str:
return "debrid"
def upload(self, file_path: Path, **kwargs: Any) -> str:
"""Upload file to Debrid service.
Args:
file_path: Path to the file to upload
**kwargs: Debrid-specific options
Returns:
Debrid link/URL
Raises:
NotImplementedError: Debrid upload not yet implemented
"""
raise NotImplementedError("Debrid upload not yet implemented")
def search(self, query: str, **kwargs: Any) -> list[Dict[str, Any]]:
"""Search Debrid for files matching query.
Searches through available magnets in AllDebrid storage and returns
matching results with download links.
Args:
query: Search query string (filename or magnet name pattern)
limit: Maximum number of results to return (default: 50)
api_key: Optional override for API key (uses default if not provided)
Returns:
List of dicts with keys:
- 'name': File/magnet name
- 'title': Same as name (for compatibility)
- 'url': AllDebrid download link
- 'size': File size in bytes
- 'magnet_id': AllDebrid magnet ID
- 'origin': 'debrid'
- 'annotations': Status and seeders info
Example:
results = storage["debrid"].search("movie.mkv")
for result in results:
print(f"{result['name']} - {result['size']} bytes")
"""
api_key = kwargs.get("api_key") or self._api_key
if not api_key:
raise ValueError("'api_key' parameter required for Debrid search (not configured)")
limit = kwargs.get("limit", 50)
try:
from helper.alldebrid import AllDebridClient
debug(f"Searching AllDebrid for: {query}")
client = AllDebridClient(api_key=api_key)
# STEP 1: Get magnet status list
try:
response = client._request('magnet/status')
magnets_data = response.get('data', {})
magnets = magnets_data.get('magnets', [])
if not isinstance(magnets, list):
magnets = [magnets] if magnets else []
debug(f"[debrid_search] Got {len(magnets)} total magnets")
except Exception as e:
log(f"⚠ Failed to get magnets list: {e}", file=sys.stderr)
magnets = []
# Filter by query for relevant magnets
query_lower = query.lower()
matching_magnet_ids = []
magnet_info_map = {} # Store status info for later
# "*" means "match all" - include all magnets
match_all = query_lower == "*"
# Split query into terms for AND logic
terms = [t.strip() for t in query_lower.replace(',', ' ').split() if t.strip()]
if not terms:
terms = [query_lower]
for magnet in magnets:
filename = magnet.get('filename', '').lower()
status_code = magnet.get('statusCode', 0)
magnet_id = magnet.get('id')
# Only include ready or nearly-ready magnets (skip error states 5+)
if status_code not in [0, 1, 2, 3, 4]:
continue
# Match query against filename (or match all if query is "*")
if not match_all:
if not all(term in filename for term in terms):
continue
matching_magnet_ids.append(magnet_id)
magnet_info_map[magnet_id] = magnet
debug(f"[debrid_search] ✓ Matched magnet {magnet_id}: {filename}")
debug(f"[debrid_search] Found {len(matching_magnet_ids)} matching magnets")
results = []
# Return one result per magnet (not per file)
# This keeps search results clean and allows user to download entire magnet at once
for magnet_id in matching_magnet_ids:
magnet_status = magnet_info_map.get(magnet_id, {})
filename = magnet_status.get('filename', 'Unknown')
status = magnet_status.get('status', 'Unknown')
status_code = magnet_status.get('statusCode', 0)
size = magnet_status.get('size', 0)
seeders = magnet_status.get('seeders', 0)
# Format size nicely
size_label = f"{size / (1024**3):.2f}GB" if size > 0 else "Unknown"
# Create one result per magnet with aggregated info
results.append({
'name': filename,
'title': filename,
'url': '', # No direct file link for the magnet itself
'size': size,
'size_bytes': size,
'magnet_id': magnet_id,
'origin': 'debrid',
'annotations': [
status,
f"{seeders} seeders",
size_label,
],
'target': '', # Magnet ID is stored, user can then download it
})
debug(f"Found {len(results)} result(s) on AllDebrid")
return results[:limit]
except Exception as exc:
log(f"❌ Debrid search failed: {exc}", file=sys.stderr)
raise
def _flatten_file_tree(self, files: list[Any], prefix: str = '') -> list[Dict[str, Any]]:
"""Flatten AllDebrid's nested file tree structure.
AllDebrid returns files in a tree structure with folders ('e' key).
This flattens it to a list of individual files.
Args:
files: AllDebrid file tree structure
prefix: Current path prefix (used recursively)
Returns:
List of flattened file entries with 'name', 'size', 'link' keys
"""
result = []
if not isinstance(files, list):
return result
for item in files:
if not isinstance(item, dict):
continue
name = item.get('n', '')
# Check if it's a folder (has 'e' key with entries)
if 'e' in item:
# Recursively flatten subfolder
subfolder_path = f"{prefix}/{name}" if prefix else name
subitems = item.get('e', [])
result.extend(self._flatten_file_tree(subitems, subfolder_path))
else:
# It's a file - add it to results
file_path = f"{prefix}/{name}" if prefix else name
result.append({
'name': file_path,
'size': item.get('s', 0),
'link': item.get('l', ''),
})
return result
class MatrixStorageBackend(StorageBackend): class MatrixStorageBackend(StorageBackend):
"""File storage backend for Matrix (Element) chat rooms.""" """File storage backend for Matrix (Element) chat rooms."""
@@ -1344,7 +1230,6 @@ class FileStorage:
# Search with searchable backends (uses configured locations) # Search with searchable backends (uses configured locations)
results = storage["hydrus"].search("music") results = storage["hydrus"].search("music")
results = storage["local"].search("song") # Uses config["Local"]["path"] results = storage["local"].search("song") # Uses config["Local"]["path"]
results = storage["debrid"].search("movie")
""" """
def __init__(self, config: Optional[Dict[str, Any]] = None) -> None: def __init__(self, config: Optional[Dict[str, Any]] = None) -> None:
@@ -1356,13 +1241,11 @@ class FileStorage:
config = config or {} config = config or {}
# Extract backend-specific settings from config # Extract backend-specific settings from config
from config import get_local_storage_path, get_debrid_api_key from config import get_local_storage_path
local_path = get_local_storage_path(config) local_path = get_local_storage_path(config)
local_path_str = str(local_path) if local_path else None local_path_str = str(local_path) if local_path else None
debrid_api_key = get_debrid_api_key(config)
self._backends: Dict[str, StorageBackend] = {} self._backends: Dict[str, StorageBackend] = {}
# Always include local backend (even if no default path configured) # Always include local backend (even if no default path configured)
@@ -1372,10 +1255,6 @@ class FileStorage:
# Include Hydrus backend (configuration optional) # Include Hydrus backend (configuration optional)
self._backends["hydrus"] = HydrusStorageBackend(config=config) self._backends["hydrus"] = HydrusStorageBackend(config=config)
# Include Debrid backend (API key optional - will raise on use if not provided)
if debrid_api_key:
self._backends["debrid"] = DebridStorageBackend(api_key=debrid_api_key)
# Include Matrix backend # Include Matrix backend
self._backends["matrix"] = MatrixStorageBackend() self._backends["matrix"] = MatrixStorageBackend()

View File

@@ -71,10 +71,208 @@ class ITunesProvider(MetadataProvider):
return items return items
class OpenLibraryMetadataProvider(MetadataProvider):
"""Metadata provider for OpenLibrary book metadata."""
@property
def name(self) -> str: # type: ignore[override]
return "openlibrary"
def search(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
query_clean = (query or "").strip()
if not query_clean:
return []
try:
# Prefer ISBN-specific search when the query looks like one
if query_clean.replace("-", "").isdigit() and len(query_clean.replace("-", "")) in (10, 13):
q = f"isbn:{query_clean.replace('-', '')}"
else:
q = query_clean
resp = requests.get(
"https://openlibrary.org/search.json",
params={"q": q, "limit": limit},
timeout=10,
)
resp.raise_for_status()
data = resp.json()
except Exception as exc:
log(f"OpenLibrary search failed: {exc}", file=sys.stderr)
return []
items: List[Dict[str, Any]] = []
for doc in data.get("docs", [])[:limit]:
authors = doc.get("author_name") or []
publisher = ""
publishers = doc.get("publisher") or []
if isinstance(publishers, list) and publishers:
publisher = publishers[0]
# Prefer 13-digit ISBN when available, otherwise 10-digit
isbn_list = doc.get("isbn") or []
isbn_13 = next((i for i in isbn_list if len(str(i)) == 13), None)
isbn_10 = next((i for i in isbn_list if len(str(i)) == 10), None)
# Derive OLID from key
olid = ""
key = doc.get("key", "")
if isinstance(key, str) and key:
olid = key.split("/")[-1]
items.append({
"title": doc.get("title") or "",
"artist": ", ".join(authors) if authors else "",
"album": publisher,
"year": str(doc.get("first_publish_year") or ""),
"provider": self.name,
"authors": authors,
"publisher": publisher,
"identifiers": {
"isbn_13": isbn_13,
"isbn_10": isbn_10,
"openlibrary": olid,
"oclc": (doc.get("oclc_numbers") or [None])[0],
"lccn": (doc.get("lccn") or [None])[0],
},
"description": None,
})
return items
def to_tags(self, item: Dict[str, Any]) -> List[str]:
tags: List[str] = []
title = item.get("title")
authors = item.get("authors") or []
publisher = item.get("publisher")
year = item.get("year")
description = item.get("description") or ""
if title:
tags.append(f"title:{title}")
for author in authors:
if author:
tags.append(f"author:{author}")
if publisher:
tags.append(f"publisher:{publisher}")
if year:
tags.append(f"year:{year}")
if description:
tags.append(f"description:{description[:200]}")
identifiers = item.get("identifiers") or {}
for key, value in identifiers.items():
if value:
tags.append(f"{key}:{value}")
tags.append(f"source:{self.name}")
return tags
class GoogleBooksMetadataProvider(MetadataProvider):
"""Metadata provider for Google Books volumes API."""
@property
def name(self) -> str: # type: ignore[override]
return "googlebooks"
def search(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
query_clean = (query or "").strip()
if not query_clean:
return []
# Prefer ISBN queries when possible
if query_clean.replace("-", "").isdigit() and len(query_clean.replace("-", "")) in (10, 13):
q = f"isbn:{query_clean.replace('-', '')}"
else:
q = query_clean
try:
resp = requests.get(
"https://www.googleapis.com/books/v1/volumes",
params={"q": q, "maxResults": limit},
timeout=10,
)
resp.raise_for_status()
payload = resp.json()
except Exception as exc:
log(f"Google Books search failed: {exc}", file=sys.stderr)
return []
items: List[Dict[str, Any]] = []
for volume in payload.get("items", [])[:limit]:
info = volume.get("volumeInfo") or {}
authors = info.get("authors") or []
publisher = info.get("publisher", "")
published_date = info.get("publishedDate", "")
year = str(published_date)[:4] if published_date else ""
identifiers_raw = info.get("industryIdentifiers") or []
identifiers: Dict[str, Optional[str]] = {"googlebooks": volume.get("id")}
for ident in identifiers_raw:
if not isinstance(ident, dict):
continue
ident_type = ident.get("type", "").lower()
ident_value = ident.get("identifier")
if not ident_value:
continue
if ident_type == "isbn_13":
identifiers.setdefault("isbn_13", ident_value)
elif ident_type == "isbn_10":
identifiers.setdefault("isbn_10", ident_value)
else:
identifiers.setdefault(ident_type, ident_value)
items.append({
"title": info.get("title") or "",
"artist": ", ".join(authors) if authors else "",
"album": publisher,
"year": year,
"provider": self.name,
"authors": authors,
"publisher": publisher,
"identifiers": identifiers,
"description": info.get("description", ""),
})
return items
def to_tags(self, item: Dict[str, Any]) -> List[str]:
tags: List[str] = []
title = item.get("title")
authors = item.get("authors") or []
publisher = item.get("publisher")
year = item.get("year")
description = item.get("description") or ""
if title:
tags.append(f"title:{title}")
for author in authors:
if author:
tags.append(f"author:{author}")
if publisher:
tags.append(f"publisher:{publisher}")
if year:
tags.append(f"year:{year}")
if description:
tags.append(f"description:{description[:200]}")
identifiers = item.get("identifiers") or {}
for key, value in identifiers.items():
if value:
tags.append(f"{key}:{value}")
tags.append(f"source:{self.name}")
return tags
# Registry --------------------------------------------------------------- # Registry ---------------------------------------------------------------
_METADATA_PROVIDERS: Dict[str, Type[MetadataProvider]] = { _METADATA_PROVIDERS: Dict[str, Type[MetadataProvider]] = {
"itunes": ITunesProvider, "itunes": ITunesProvider,
"openlibrary": OpenLibraryMetadataProvider,
"googlebooks": GoogleBooksMetadataProvider,
"google": GoogleBooksMetadataProvider,
} }

View File

@@ -293,13 +293,7 @@ class LocalStorageProvider(SearchProvider):
class LibGenProvider(SearchProvider): class LibGenProvider(SearchProvider):
"""Search provider for Library Genesis books.""" """Search provider for Library Genesis books."""
# Define fields to display (note: LibGen doesn't have API field mapping like OpenLibrary) RESULT_FIELDS: List[Tuple[str, str, Optional[Any]]] = [] # columns built manually
# These are extracted from the book dict directly
RESULT_FIELDS = [
("title", "Title", None),
("author", "Author(s)", None),
("year", "Year", None),
]
def __init__(self, config: Dict[str, Any] = None): def __init__(self, config: Dict[str, Any] = None):
super().__init__(config) super().__init__(config)
@@ -363,15 +357,22 @@ class LibGenProvider(SearchProvider):
search_results = [] search_results = []
for idx, book in enumerate(books, 1): for idx, book in enumerate(books, 1):
# Build columns dynamically from RESULT_FIELDS
columns = self.build_columns_from_doc(book, idx)
title = book.get("title", "Unknown") title = book.get("title", "Unknown")
author = book.get("author", "Unknown") author = book.get("author", "Unknown")
year = book.get("year", "Unknown") year = book.get("year", "Unknown")
pages = book.get("pages") or book.get("pages_str") or ""
extension = book.get("extension", "") or book.get("ext", "")
filesize = book.get("filesize_str", "Unknown") filesize = book.get("filesize_str", "Unknown")
isbn = book.get("isbn", "") isbn = book.get("isbn", "")
mirror_url = book.get("mirror_url", "") mirror_url = book.get("mirror_url", "")
# Columns: Title, Author, Pages, Ext
columns = [
("Title", title),
("Author", author),
("Pages", str(pages)),
("Ext", str(extension)),
]
# Build detail with author and year # Build detail with author and year
detail = f"By: {author}" detail = f"By: {author}"
@@ -1077,12 +1078,7 @@ class OpenLibraryProvider(SearchProvider):
"""Search provider for OpenLibrary.""" """Search provider for OpenLibrary."""
# Define fields to request from API and how to display them # Define fields to request from API and how to display them
RESULT_FIELDS = [ RESULT_FIELDS: List[Tuple[str, str, Optional[Any]]] = [] # columns built manually
("title", "Title", None),
("author_name", "Author", lambda x: ", ".join(x) if isinstance(x, list) else x),
("first_publish_year", "Year", None),
("status", "Status", None),
]
def __init__(self, config: Dict[str, Any] = None): def __init__(self, config: Dict[str, Any] = None):
super().__init__(config) super().__init__(config)
@@ -1146,10 +1142,25 @@ class OpenLibraryProvider(SearchProvider):
return [] return []
# Default to title/general search # Default to title/general search
requested_fields = [
"title",
"author_name",
"first_publish_year",
"number_of_pages_median",
"isbn",
"oclc_numbers",
"lccn",
"language",
"key",
"edition_key",
"ebook_access",
"ia",
"has_fulltext",
]
params = { params = {
"q": query_clean, "q": query_clean,
"limit": limit, "limit": limit,
"fields": f"{self.get_api_fields_string()},isbn,oclc_numbers,lccn,number_of_pages_median,language,key,ebook_access,ia,has_fulltext", "fields": ",".join(requested_fields),
} }
response = requests.get(search_url, params=params, timeout=9) response = requests.get(search_url, params=params, timeout=9)
@@ -1158,16 +1169,18 @@ class OpenLibraryProvider(SearchProvider):
search_results = [] search_results = []
for idx, doc in enumerate(data.get("docs", []), 1): for idx, doc in enumerate(data.get("docs", []), 1):
# Extract OLID first (needed for metadata) # Prefer edition_key (books/OLxxxM). Fallback to work key.
olid = doc.get("key", "").split("/")[-1] edition_keys = doc.get("edition_key") or []
olid = ""
if isinstance(edition_keys, list) and edition_keys:
olid = str(edition_keys[0]).strip()
if not olid:
olid = doc.get("key", "").split("/")[-1]
# Determine status/availability # Determine status/availability
status, archive_id = self._derive_status(doc) status, archive_id = self._derive_status(doc)
doc["status"] = status doc["status"] = status
# Build columns dynamically from RESULT_FIELDS (now includes status)
columns = self.build_columns_from_doc(doc, idx)
# Extract additional metadata # Extract additional metadata
title = doc.get("title", "Unknown") title = doc.get("title", "Unknown")
authors = doc.get("author_name", ["Unknown"]) authors = doc.get("author_name", ["Unknown"])
@@ -1183,6 +1196,13 @@ class OpenLibraryProvider(SearchProvider):
language = languages[0] if languages else "" language = languages[0] if languages else ""
author_str = ", ".join(authors) if authors else "Unknown" author_str = ", ".join(authors) if authors else "Unknown"
# Columns: Title, Author, Pages
columns = [
("Title", title),
("Author", author_str),
("Pages", str(pages or "")),
]
# Build detail with author and year # Build detail with author and year
detail = f"By: {author_str}" detail = f"By: {author_str}"

View File

@@ -52,9 +52,11 @@ _PIPELINE_LAST_ITEMS: List[Any] = []
# Store the last result table for @ selection syntax (e.g., @2, @2-5, @{1,3,5}) # Store the last result table for @ selection syntax (e.g., @2, @2-5, @{1,3,5})
_LAST_RESULT_TABLE: Optional[Any] = None _LAST_RESULT_TABLE: Optional[Any] = None
_LAST_RESULT_ITEMS: List[Any] = [] _LAST_RESULT_ITEMS: List[Any] = []
# Subject for the current result table (e.g., the file whose tags/URLs are displayed)
_LAST_RESULT_SUBJECT: Optional[Any] = None
# History of result tables for @.. navigation (LIFO stack, max 20 tables) # History of result tables for @.. navigation (LIFO stack, max 20 tables)
_RESULT_TABLE_HISTORY: List[tuple[Optional[Any], List[Any]]] = [] _RESULT_TABLE_HISTORY: List[tuple[Optional[Any], List[Any], Optional[Any]]] = []
_MAX_RESULT_TABLE_HISTORY = 20 _MAX_RESULT_TABLE_HISTORY = 20
# Current stage table for @N expansion (separate from history) # Current stage table for @N expansion (separate from history)
@@ -70,6 +72,8 @@ _DISPLAY_ITEMS: List[Any] = []
# Table for display-only commands (overlay) # Table for display-only commands (overlay)
# Used when a command wants to show a specific table formatting but not affect history # Used when a command wants to show a specific table formatting but not affect history
_DISPLAY_TABLE: Optional[Any] = None _DISPLAY_TABLE: Optional[Any] = None
# Subject for overlay/display-only tables (takes precedence over _LAST_RESULT_SUBJECT)
_DISPLAY_SUBJECT: Optional[Any] = None
# Track the indices the user selected via @ syntax for the current invocation # Track the indices the user selected via @ syntax for the current invocation
_PIPELINE_LAST_SELECTION: List[int] = [] _PIPELINE_LAST_SELECTION: List[int] = []
@@ -262,7 +266,7 @@ def reset() -> None:
"""Reset all pipeline state. Called between pipeline executions.""" """Reset all pipeline state. Called between pipeline executions."""
global _PIPE_EMITS, _PIPE_ACTIVE, _PIPE_IS_LAST, _PIPELINE_VALUES global _PIPE_EMITS, _PIPE_ACTIVE, _PIPE_IS_LAST, _PIPELINE_VALUES
global _LAST_PIPELINE_CAPTURE, _PIPELINE_REFRESHED, _PIPELINE_LAST_ITEMS global _LAST_PIPELINE_CAPTURE, _PIPELINE_REFRESHED, _PIPELINE_LAST_ITEMS
global _PIPELINE_COMMAND_TEXT global _PIPELINE_COMMAND_TEXT, _LAST_RESULT_SUBJECT, _DISPLAY_SUBJECT
_PIPE_EMITS = [] _PIPE_EMITS = []
_PIPE_ACTIVE = False _PIPE_ACTIVE = False
@@ -272,6 +276,8 @@ def reset() -> None:
_PIPELINE_LAST_ITEMS = [] _PIPELINE_LAST_ITEMS = []
_PIPELINE_VALUES = {} _PIPELINE_VALUES = {}
_PIPELINE_COMMAND_TEXT = "" _PIPELINE_COMMAND_TEXT = ""
_LAST_RESULT_SUBJECT = None
_DISPLAY_SUBJECT = None
def get_emitted_items() -> List[Any]: def get_emitted_items() -> List[Any]:
@@ -419,7 +425,7 @@ def trigger_ui_library_refresh(library_filter: str = 'local') -> None:
print(f"[trigger_ui_library_refresh] Error calling refresh callback: {e}", file=sys.stderr) print(f"[trigger_ui_library_refresh] Error calling refresh callback: {e}", file=sys.stderr)
def set_last_result_table(result_table: Optional[Any], items: Optional[List[Any]] = None) -> None: def set_last_result_table(result_table: Optional[Any], items: Optional[List[Any]] = None, subject: Optional[Any] = None) -> None:
"""Store the last result table and items for @ selection syntax. """Store the last result table and items for @ selection syntax.
This should be called after displaying a result table, so users can reference This should be called after displaying a result table, so users can reference
@@ -433,11 +439,12 @@ def set_last_result_table(result_table: Optional[Any], items: Optional[List[Any]
result_table: The ResultTable object that was displayed (or None) result_table: The ResultTable object that was displayed (or None)
items: List of items that populated the table (optional) items: List of items that populated the table (optional)
""" """
global _LAST_RESULT_TABLE, _LAST_RESULT_ITEMS, _RESULT_TABLE_HISTORY, _DISPLAY_ITEMS, _DISPLAY_TABLE global _LAST_RESULT_TABLE, _LAST_RESULT_ITEMS, _LAST_RESULT_SUBJECT
global _RESULT_TABLE_HISTORY, _DISPLAY_ITEMS, _DISPLAY_TABLE, _DISPLAY_SUBJECT
# Push current table to history before replacing # Push current table to history before replacing
if _LAST_RESULT_TABLE is not None: if _LAST_RESULT_TABLE is not None:
_RESULT_TABLE_HISTORY.append((_LAST_RESULT_TABLE, _LAST_RESULT_ITEMS.copy())) _RESULT_TABLE_HISTORY.append((_LAST_RESULT_TABLE, _LAST_RESULT_ITEMS.copy(), _LAST_RESULT_SUBJECT))
# Keep history size limited # Keep history size limited
if len(_RESULT_TABLE_HISTORY) > _MAX_RESULT_TABLE_HISTORY: if len(_RESULT_TABLE_HISTORY) > _MAX_RESULT_TABLE_HISTORY:
_RESULT_TABLE_HISTORY.pop(0) _RESULT_TABLE_HISTORY.pop(0)
@@ -445,11 +452,13 @@ def set_last_result_table(result_table: Optional[Any], items: Optional[List[Any]
# Set new current table and clear any display items/table # Set new current table and clear any display items/table
_DISPLAY_ITEMS = [] _DISPLAY_ITEMS = []
_DISPLAY_TABLE = None _DISPLAY_TABLE = None
_DISPLAY_SUBJECT = None
_LAST_RESULT_TABLE = result_table _LAST_RESULT_TABLE = result_table
_LAST_RESULT_ITEMS = items or [] _LAST_RESULT_ITEMS = items or []
_LAST_RESULT_SUBJECT = subject
def set_last_result_table_overlay(result_table: Optional[Any], items: Optional[List[Any]] = None) -> None: def set_last_result_table_overlay(result_table: Optional[Any], items: Optional[List[Any]] = None, subject: Optional[Any] = None) -> None:
"""Set a result table as an overlay (display only, no history). """Set a result table as an overlay (display only, no history).
Used for commands like get-tag that want to show a formatted table but Used for commands like get-tag that want to show a formatted table but
@@ -459,13 +468,14 @@ def set_last_result_table_overlay(result_table: Optional[Any], items: Optional[L
result_table: The ResultTable object to display result_table: The ResultTable object to display
items: List of items for @N selection items: List of items for @N selection
""" """
global _DISPLAY_ITEMS, _DISPLAY_TABLE global _DISPLAY_ITEMS, _DISPLAY_TABLE, _DISPLAY_SUBJECT
_DISPLAY_TABLE = result_table _DISPLAY_TABLE = result_table
_DISPLAY_ITEMS = items or [] _DISPLAY_ITEMS = items or []
_DISPLAY_SUBJECT = subject
def set_last_result_table_preserve_history(result_table: Optional[Any], items: Optional[List[Any]] = None) -> None: def set_last_result_table_preserve_history(result_table: Optional[Any], items: Optional[List[Any]] = None, subject: Optional[Any] = None) -> None:
"""Update the last result table WITHOUT adding to history. """Update the last result table WITHOUT adding to history.
Used for action commands (delete-tag, add-tag, etc.) that modify data but shouldn't Used for action commands (delete-tag, add-tag, etc.) that modify data but shouldn't
@@ -475,11 +485,12 @@ def set_last_result_table_preserve_history(result_table: Optional[Any], items: O
result_table: The ResultTable object that was displayed (or None) result_table: The ResultTable object that was displayed (or None)
items: List of items that populated the table (optional) items: List of items that populated the table (optional)
""" """
global _LAST_RESULT_TABLE, _LAST_RESULT_ITEMS global _LAST_RESULT_TABLE, _LAST_RESULT_ITEMS, _LAST_RESULT_SUBJECT
# Update current table WITHOUT pushing to history # Update current table WITHOUT pushing to history
_LAST_RESULT_TABLE = result_table _LAST_RESULT_TABLE = result_table
_LAST_RESULT_ITEMS = items or [] _LAST_RESULT_ITEMS = items or []
_LAST_RESULT_SUBJECT = subject
def set_last_result_items_only(items: Optional[List[Any]]) -> None: def set_last_result_items_only(items: Optional[List[Any]]) -> None:
@@ -494,13 +505,14 @@ def set_last_result_items_only(items: Optional[List[Any]]) -> None:
Args: Args:
items: List of items to select from items: List of items to select from
""" """
global _DISPLAY_ITEMS, _DISPLAY_TABLE global _DISPLAY_ITEMS, _DISPLAY_TABLE, _DISPLAY_SUBJECT
# Store items for immediate @N selection, but DON'T modify _LAST_RESULT_ITEMS # Store items for immediate @N selection, but DON'T modify _LAST_RESULT_ITEMS
# This ensures history contains original search data, not display transformations # This ensures history contains original search data, not display transformations
_DISPLAY_ITEMS = items or [] _DISPLAY_ITEMS = items or []
# Clear display table since we're setting items only (CLI will generate table if needed) # Clear display table since we're setting items only (CLI will generate table if needed)
_DISPLAY_TABLE = None _DISPLAY_TABLE = None
_DISPLAY_SUBJECT = None
def restore_previous_result_table() -> bool: def restore_previous_result_table() -> bool:
@@ -509,22 +521,32 @@ def restore_previous_result_table() -> bool:
Returns: Returns:
True if a previous table was restored, False if history is empty True if a previous table was restored, False if history is empty
""" """
global _LAST_RESULT_TABLE, _LAST_RESULT_ITEMS, _RESULT_TABLE_HISTORY, _DISPLAY_ITEMS, _DISPLAY_TABLE global _LAST_RESULT_TABLE, _LAST_RESULT_ITEMS, _LAST_RESULT_SUBJECT
global _RESULT_TABLE_HISTORY, _DISPLAY_ITEMS, _DISPLAY_TABLE, _DISPLAY_SUBJECT
# If we have an active overlay (display items/table), clear it to "go back" to the underlying table # If we have an active overlay (display items/table), clear it to "go back" to the underlying table
if _DISPLAY_ITEMS or _DISPLAY_TABLE: if _DISPLAY_ITEMS or _DISPLAY_TABLE or _DISPLAY_SUBJECT is not None:
_DISPLAY_ITEMS = [] _DISPLAY_ITEMS = []
_DISPLAY_TABLE = None _DISPLAY_TABLE = None
_DISPLAY_SUBJECT = None
return True return True
if not _RESULT_TABLE_HISTORY: if not _RESULT_TABLE_HISTORY:
return False return False
# Pop from history and restore # Pop from history and restore
_LAST_RESULT_TABLE, _LAST_RESULT_ITEMS = _RESULT_TABLE_HISTORY.pop() prev = _RESULT_TABLE_HISTORY.pop()
if isinstance(prev, tuple) and len(prev) >= 3:
_LAST_RESULT_TABLE, _LAST_RESULT_ITEMS, _LAST_RESULT_SUBJECT = prev[0], prev[1], prev[2]
elif isinstance(prev, tuple) and len(prev) == 2:
_LAST_RESULT_TABLE, _LAST_RESULT_ITEMS = prev
_LAST_RESULT_SUBJECT = None
else:
_LAST_RESULT_TABLE, _LAST_RESULT_ITEMS, _LAST_RESULT_SUBJECT = None, [], None
# Clear display items so get_last_result_items() falls back to restored items # Clear display items so get_last_result_items() falls back to restored items
_DISPLAY_ITEMS = [] _DISPLAY_ITEMS = []
_DISPLAY_TABLE = None _DISPLAY_TABLE = None
_DISPLAY_SUBJECT = None
return True return True
@@ -537,6 +559,17 @@ def get_display_table() -> Optional[Any]:
return _DISPLAY_TABLE return _DISPLAY_TABLE
def get_last_result_subject() -> Optional[Any]:
"""Get the subject associated with the current result table or overlay.
Overlay subject (from display-only tables) takes precedence; otherwise returns
the subject stored with the last result table.
"""
if _DISPLAY_SUBJECT is not None:
return _DISPLAY_SUBJECT
return _LAST_RESULT_SUBJECT
def get_last_result_table() -> Optional[Any]: def get_last_result_table() -> Optional[Any]:
"""Get the current last result table. """Get the current last result table.

View File

@@ -175,6 +175,8 @@ class ResultTable:
"""Command that generated this table (e.g., 'download-data URL')""" """Command that generated this table (e.g., 'download-data URL')"""
self.source_args: List[str] = [] self.source_args: List[str] = []
"""Base arguments for the source command""" """Base arguments for the source command"""
self.header_lines: List[str] = []
"""Optional metadata lines rendered under the title"""
def add_row(self) -> ResultRow: def add_row(self) -> ResultRow:
"""Add a new row to the table and return it for configuration.""" """Add a new row to the table and return it for configuration."""
@@ -211,6 +213,34 @@ class ResultTable:
""" """
if 0 <= row_index < len(self.rows): if 0 <= row_index < len(self.rows):
self.rows[row_index].selection_args = selection_args self.rows[row_index].selection_args = selection_args
def set_header_lines(self, lines: List[str]) -> "ResultTable":
"""Attach metadata lines that render beneath the title."""
self.header_lines = [line for line in lines if line]
return self
def set_header_line(self, line: str) -> "ResultTable":
"""Attach a single metadata line beneath the title."""
return self.set_header_lines([line] if line else [])
def set_storage_summary(self, storage_counts: Dict[str, int], filter_text: Optional[str] = None, inline: bool = False) -> str:
"""Render a storage count summary (e.g., "Hydrus:0 Local:1 | filter: \"q\"").
Returns the summary string so callers can place it inline with the title if desired.
"""
summary_parts: List[str] = []
if storage_counts:
summary_parts.append(" ".join(f"{name}:{count}" for name, count in storage_counts.items()))
if filter_text:
safe_filter = filter_text.replace("\"", "\\\"")
summary_parts.append(f'filter: "{safe_filter}"')
summary = " | ".join(summary_parts)
if not inline:
self.set_header_line(summary)
return summary
def add_result(self, result: Any) -> "ResultTable": def add_result(self, result: Any) -> "ResultTable":
"""Add a result object (SearchResult, PipeObject, ResultItem, TagItem, or dict) as a row. """Add a result object (SearchResult, PipeObject, ResultItem, TagItem, or dict) as a row.
@@ -249,7 +279,14 @@ class ResultTable:
def _add_search_result(self, row: ResultRow, result: Any) -> None: def _add_search_result(self, row: ResultRow, result: Any) -> None:
"""Extract and add SearchResult fields to row.""" """Extract and add SearchResult fields to row."""
# Core fields # If provider supplied explicit columns, render those and skip legacy defaults
cols = getattr(result, "columns", None)
if cols:
for name, value in cols:
row.add_column(name, value)
return
# Core fields (legacy fallback)
title = getattr(result, 'title', '') title = getattr(result, 'title', '')
origin = getattr(result, 'origin', '').lower() origin = getattr(result, 'origin', '').lower()
@@ -597,6 +634,9 @@ class ResultTable:
lines.append("=" * self.title_width) lines.append("=" * self.title_width)
lines.append(self.title.center(self.title_width)) lines.append(self.title.center(self.title_width))
lines.append("=" * self.title_width) lines.append("=" * self.title_width)
if self.header_lines:
lines.extend(self.header_lines)
# Add header with # column # Add header with # column
header_parts = ["#".ljust(num_width)] header_parts = ["#".ljust(num_width)]