Files
Medios-Macina/CLI.py
2025-12-30 23:19:02 -08:00

4764 lines
193 KiB
Python

from __future__ import annotations
"""Medeia-Macina CLI.
This module intentionally uses a class-based architecture:
- no legacy procedural entrypoints
- no compatibility shims
- all REPL/pipeline/cmdlet execution state lives on objects
"""
import atexit
import io
import json
import re
import shlex
import sys
import threading
import time
import uuid
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence, Set, TextIO, Tuple, cast
import typer
from prompt_toolkit import PromptSession
from prompt_toolkit.completion import Completer, Completion
from prompt_toolkit.document import Document
from prompt_toolkit.lexers import Lexer
from prompt_toolkit.styles import Style
from rich.console import Console
from rich.layout import Layout
from rich.panel import Panel
from rich.markdown import Markdown
from rich.bar import Bar
from rich.table import Table
from SYS.rich_display import stderr_console, stdout_console
def _install_rich_traceback(*, show_locals: bool = False) -> None:
"""Install Rich traceback handler as the default excepthook.
This keeps uncaught exceptions readable in the terminal.
"""
try:
from rich.traceback import install as rich_traceback_install
rich_traceback_install(show_locals=bool(show_locals))
except Exception:
# Fall back to the standard Python traceback if Rich isn't available.
return
# Default to Rich tracebacks for the whole process.
_install_rich_traceback(show_locals=False)
from SYS.background_notifier import ensure_background_notifier
from SYS.logger import debug, set_debug
from SYS.worker_manager import WorkerManager
from SYS.cmdlet_catalog import (
ensure_registry_loaded,
get_cmdlet_arg_choices,
get_cmdlet_arg_flags,
get_cmdlet_metadata,
import_cmd_module,
list_cmdlet_metadata,
list_cmdlet_names,
)
from SYS.config import get_local_storage_path, load_config
from SYS.result_table import ResultTable
class SelectionSyntax:
"""Parses @ selection syntax into 1-based indices."""
_RANGE_RE = re.compile(r"^[0-9\-]+$")
@staticmethod
def parse(token: str) -> Optional[Set[int]]:
"""Return 1-based indices or None when not a concrete selection.
Concrete selections:
- @2
- @2-5
- @{1,3,5}
- @2,5,7-9
Special (non-concrete) selectors return None:
- @* (select all)
- @.. (history prev)
- @,, (history next)
"""
if not token or not token.startswith("@"):
return None
selector = token[1:].strip()
if selector in (".", ",", "*"):
return None
if selector.startswith("{") and selector.endswith("}"):
selector = selector[1:-1].strip()
indices: Set[int] = set()
for part in selector.split(","):
part = part.strip()
if not part:
continue
if "-" in part:
pieces = part.split("-", 1)
if len(pieces) != 2:
return None
start_str = pieces[0].strip()
end_str = pieces[1].strip()
if not start_str or not end_str:
return None
try:
start = int(start_str)
end = int(end_str)
except ValueError:
return None
if start <= 0 or end <= 0 or start > end:
return None
indices.update(range(start, end + 1))
continue
try:
value = int(part)
except ValueError:
return None
if value <= 0:
return None
indices.add(value)
return indices if indices else None
class SelectionFilterSyntax:
"""Parses and applies @"COL:filter" selection filters.
Notes:
- CLI tokenization (shlex) strips quotes, so a user input of `@"TITLE:foo"`
arrives as `@TITLE:foo`. We support both forms.
- Filters apply to the *current selectable table items* (in-memory), not to
provider searches.
"""
_OP_RE = re.compile(r"^(>=|<=|!=|==|>|<|=)\s*(.+)$")
_DUR_TOKEN_RE = re.compile(r"(?i)(\d+)\s*([hms])")
@staticmethod
def parse(token: str) -> Optional[List[Tuple[str, str]]]:
"""Return list of (column, raw_expression) or None when not a filter token."""
if not token or not str(token).startswith("@"):
return None
if token.strip() == "@*":
return None
# If this is a concrete numeric selection (@2, @1-3, @{1,3}), do not treat it as a filter.
try:
if SelectionSyntax.parse(str(token)) is not None:
return None
except Exception:
pass
raw = str(token)[1:].strip()
if not raw:
return None
# If quotes survived tokenization, strip a single symmetric wrapper.
if len(raw) >= 2 and raw[0] == raw[-1] and raw[0] in ('"', "'"):
raw = raw[1:-1].strip()
# Shorthand: @"foo" means Title contains "foo".
if ":" not in raw:
if raw:
return [("Title", raw)]
return None
parts = [p.strip() for p in raw.split(",") if p.strip()]
conditions: List[Tuple[str, str]] = []
for part in parts:
if ":" not in part:
return None
col, expr = part.split(":", 1)
col = str(col or "").strip()
expr = str(expr or "").strip()
if not col:
return None
conditions.append((col, expr))
return conditions if conditions else None
@staticmethod
def _norm_key(text: str) -> str:
return re.sub(r"\s+", " ", str(text or "").strip().lower())
@staticmethod
def _item_column_map(item: Any) -> Dict[str, str]:
out: Dict[str, str] = {}
def _set(k: Any, v: Any) -> None:
key = SelectionFilterSyntax._norm_key(str(k or ""))
if not key:
return
if v is None:
return
try:
if isinstance(v, (list, tuple, set)):
text = ", ".join(str(x) for x in v if x is not None)
else:
text = str(v)
except Exception:
return
out[key] = text
if isinstance(item, dict):
# Display columns (primary UX surface)
cols = item.get("columns")
if isinstance(cols, list):
for pair in cols:
try:
if isinstance(pair, (list, tuple)) and len(pair) == 2:
_set(pair[0], pair[1])
except Exception:
continue
# Direct keys as fallback
for k, v in item.items():
if k == "columns":
continue
_set(k, v)
else:
cols = getattr(item, "columns", None)
if isinstance(cols, list):
for pair in cols:
try:
if isinstance(pair, (list, tuple)) and len(pair) == 2:
_set(pair[0], pair[1])
except Exception:
continue
for k in ("title", "path", "detail", "provider", "store", "table"):
try:
_set(k, getattr(item, k, None))
except Exception:
pass
return out
@staticmethod
def _parse_duration_seconds(text: str) -> Optional[int]:
s = str(text or "").strip()
if not s:
return None
if s.isdigit():
try:
return max(0, int(s))
except Exception:
return None
# clock format: M:SS or H:MM:SS
if ":" in s:
parts = [p.strip() for p in s.split(":")]
if len(parts) == 2 and all(p.isdigit() for p in parts):
m, sec = parts
return max(0, int(m) * 60 + int(sec))
if len(parts) == 3 and all(p.isdigit() for p in parts):
h, m, sec = parts
return max(0, int(h) * 3600 + int(m) * 60 + int(sec))
# token format: 1h2m3s (tokens can appear in any combination)
total = 0
found = False
for m in SelectionFilterSyntax._DUR_TOKEN_RE.finditer(s):
found = True
n = int(m.group(1))
unit = m.group(2).lower()
if unit == "h":
total += n * 3600
elif unit == "m":
total += n * 60
elif unit == "s":
total += n
if found:
return max(0, int(total))
return None
@staticmethod
def _parse_float(text: str) -> Optional[float]:
s = str(text or "").strip()
if not s:
return None
s = s.replace(",", "")
try:
return float(s)
except Exception:
return None
@staticmethod
def _parse_op(expr: str) -> tuple[Optional[str], str]:
text = str(expr or "").strip()
if not text:
return None, ""
m = SelectionFilterSyntax._OP_RE.match(text)
if not m:
return None, text
return m.group(1), str(m.group(2) or "").strip()
@staticmethod
def matches(item: Any, conditions: List[Tuple[str, str]]) -> bool:
colmap = SelectionFilterSyntax._item_column_map(item)
for col, expr in conditions:
key = SelectionFilterSyntax._norm_key(col)
actual = colmap.get(key)
# Convenience aliases for common UX names.
if actual is None:
if key == "duration":
actual = colmap.get("duration")
elif key == "title":
actual = colmap.get("title")
if actual is None:
return False
op, rhs = SelectionFilterSyntax._parse_op(expr)
left_text = str(actual or "").strip()
right_text = str(rhs or "").strip()
if op is None:
if not right_text:
return False
if right_text.lower() not in left_text.lower():
return False
continue
# Comparator: try duration parsing first when it looks time-like.
prefer_duration = (
key == "duration"
or any(ch in right_text for ch in (":", "h", "m", "s"))
or any(ch in left_text for ch in (":", "h", "m", "s"))
)
left_num: Optional[float] = None
right_num: Optional[float] = None
if prefer_duration:
ldur = SelectionFilterSyntax._parse_duration_seconds(left_text)
rdur = SelectionFilterSyntax._parse_duration_seconds(right_text)
if ldur is not None and rdur is not None:
left_num = float(ldur)
right_num = float(rdur)
if left_num is None or right_num is None:
left_num = SelectionFilterSyntax._parse_float(left_text)
right_num = SelectionFilterSyntax._parse_float(right_text)
if left_num is not None and right_num is not None:
if op in ("=", "=="):
if not (left_num == right_num):
return False
elif op == "!=":
if not (left_num != right_num):
return False
elif op == ">":
if not (left_num > right_num):
return False
elif op == ">=":
if not (left_num >= right_num):
return False
elif op == "<":
if not (left_num < right_num):
return False
elif op == "<=":
if not (left_num <= right_num):
return False
else:
return False
continue
# Fallback to string equality for =/!= when numeric parsing fails.
if op in ("=", "=="):
if left_text.lower() != right_text.lower():
return False
elif op == "!=":
if left_text.lower() == right_text.lower():
return False
else:
return False
return True
class WorkerOutputMirror(io.TextIOBase):
"""Mirror stdout/stderr to worker manager while preserving console output."""
def __init__(
self,
original: TextIO,
manager: WorkerManager,
worker_id: str,
channel: str
):
self._original = original
self._manager = manager
self._worker_id = worker_id
self._channel = channel
self._pending: str = ""
def write(self, data: str) -> int: # type: ignore[override]
if not data:
return 0
self._original.write(data)
self._buffer_text(data)
return len(data)
def flush(self) -> None: # type: ignore[override]
self._original.flush()
self._flush_pending(force=True)
def isatty(self) -> bool: # pragma: no cover
return bool(getattr(self._original, "isatty", lambda: False)())
def _buffer_text(self, data: str) -> None:
combined = self._pending + data
lines = combined.splitlines(keepends=True)
if not lines:
self._pending = combined
return
if lines[-1].endswith(("\n", "\r")):
complete = lines
self._pending = ""
else:
complete = lines[:-1]
self._pending = lines[-1]
for chunk in complete:
self._emit(chunk)
def _flush_pending(self, *, force: bool = False) -> None:
if self._pending and force:
self._emit(self._pending)
self._pending = ""
def _emit(self, text: str) -> None:
if not text:
return
try:
self._manager.append_stdout(self._worker_id, text, channel=self._channel)
except Exception:
pass
@property
def encoding(self) -> str: # type: ignore[override]
return getattr(self._original, "encoding", "utf-8")
class WorkerStageSession:
"""Lifecycle helper for wrapping a CLI cmdlet execution in a worker record."""
def __init__(
self,
*,
manager: WorkerManager,
worker_id: str,
orig_stdout: TextIO,
orig_stderr: TextIO,
stdout_proxy: WorkerOutputMirror,
stderr_proxy: WorkerOutputMirror,
config: Optional[Dict[str,
Any]],
logging_enabled: bool,
completion_label: str,
error_label: str,
) -> None:
self.manager = manager
self.worker_id = worker_id
self.orig_stdout = orig_stdout
self.orig_stderr = orig_stderr
self.stdout_proxy = stdout_proxy
self.stderr_proxy = stderr_proxy
self.config = config
self.logging_enabled = logging_enabled
self.closed = False
self._completion_label = completion_label
self._error_label = error_label
def close(self, *, status: str = "completed", error_msg: str = "") -> None:
if self.closed:
return
try:
self.stdout_proxy.flush()
self.stderr_proxy.flush()
except Exception:
pass
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
if self.logging_enabled:
try:
self.manager.disable_logging_for_worker(self.worker_id)
except Exception:
pass
try:
if status == "completed":
self.manager.log_step(self.worker_id, self._completion_label)
else:
self.manager.log_step(
self.worker_id,
f"{self._error_label}: {error_msg or status}"
)
except Exception:
pass
try:
self.manager.finish_worker(
self.worker_id,
result=status or "completed",
error_msg=error_msg or ""
)
except Exception:
pass
if self.config and self.config.get("_current_worker_id") == self.worker_id:
self.config.pop("_current_worker_id", None)
self.closed = True
class WorkerManagerRegistry:
"""Process-wide WorkerManager cache keyed by library_root."""
_manager: Optional[WorkerManager] = None
_manager_root: Optional[Path] = None
_orphan_cleanup_done: bool = False
_registered: bool = False
@classmethod
def ensure(cls, config: Dict[str, Any]) -> Optional[WorkerManager]:
if not isinstance(config, dict):
return None
existing = config.get("_worker_manager")
if isinstance(existing, WorkerManager):
return existing
library_root = get_local_storage_path(config)
if not library_root:
return None
try:
resolved_root = Path(library_root).resolve()
except Exception:
resolved_root = Path(library_root)
try:
if cls._manager is None or cls._manager_root != resolved_root:
if cls._manager is not None:
try:
cls._manager.close()
except Exception:
pass
cls._manager = WorkerManager(resolved_root, auto_refresh_interval=0.5)
cls._manager_root = resolved_root
manager = cls._manager
config["_worker_manager"] = manager
if manager is not None and not cls._orphan_cleanup_done:
try:
manager.expire_running_workers(
older_than_seconds=120,
worker_id_prefix="cli_%",
reason=
"CLI session ended unexpectedly; marking worker as failed",
)
except Exception:
pass
else:
cls._orphan_cleanup_done = True
if not cls._registered:
atexit.register(cls.close)
cls._registered = True
return manager
except Exception as exc:
print(
f"[worker] Could not initialize worker manager: {exc}",
file=sys.stderr
)
return None
@classmethod
def close(cls) -> None:
if cls._manager is None:
return
try:
cls._manager.close()
except Exception:
pass
cls._manager = None
cls._manager_root = None
cls._orphan_cleanup_done = False
class WorkerStages:
"""Factory methods for stage/pipeline worker sessions."""
@staticmethod
def _start_worker_session(
worker_manager: Optional[WorkerManager],
*,
worker_type: str,
title: str,
description: str,
pipe_text: str,
config: Optional[Dict[str,
Any]],
completion_label: str,
error_label: str,
skip_logging_for: Optional[Set[str]] = None,
session_worker_ids: Optional[Set[str]] = None,
) -> Optional[WorkerStageSession]:
if worker_manager is None:
return None
if skip_logging_for and worker_type in skip_logging_for:
return None
safe_type = worker_type or "cmd"
worker_id = f"cli_{safe_type[:8]}_{uuid.uuid4().hex[:6]}"
try:
tracked = worker_manager.track_worker(
worker_id,
worker_type=worker_type,
title=title,
description=description or "(no args)",
pipe=pipe_text,
)
if not tracked:
return None
except Exception as exc:
print(f"[worker] Failed to track {worker_type}: {exc}", file=sys.stderr)
return None
if session_worker_ids is not None:
session_worker_ids.add(worker_id)
logging_enabled = False
try:
handler = worker_manager.enable_logging_for_worker(worker_id)
logging_enabled = handler is not None
except Exception:
logging_enabled = False
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdout_proxy = WorkerOutputMirror(
orig_stdout,
worker_manager,
worker_id,
"stdout"
)
stderr_proxy = WorkerOutputMirror(
orig_stderr,
worker_manager,
worker_id,
"stderr"
)
sys.stdout = stdout_proxy
sys.stderr = stderr_proxy
if isinstance(config, dict):
config["_current_worker_id"] = worker_id
try:
worker_manager.log_step(worker_id, f"Started {worker_type}")
except Exception:
pass
return WorkerStageSession(
manager=worker_manager,
worker_id=worker_id,
orig_stdout=orig_stdout,
orig_stderr=orig_stderr,
stdout_proxy=stdout_proxy,
stderr_proxy=stderr_proxy,
config=config,
logging_enabled=logging_enabled,
completion_label=completion_label,
error_label=error_label,
)
@classmethod
def begin_stage(
cls,
worker_manager: Optional[WorkerManager],
*,
cmd_name: str,
stage_tokens: Sequence[str],
config: Optional[Dict[str,
Any]],
command_text: str,
) -> Optional[WorkerStageSession]:
description = " ".join(stage_tokens[1:]
) if len(stage_tokens) > 1 else "(no args)"
session_worker_ids = None
if isinstance(config, dict):
session_worker_ids = config.get("_session_worker_ids")
return cls._start_worker_session(
worker_manager,
worker_type=cmd_name,
title=f"{cmd_name} stage",
description=description,
pipe_text=command_text,
config=config,
completion_label="Stage completed",
error_label="Stage error",
skip_logging_for={".worker",
"worker",
"workers"},
session_worker_ids=session_worker_ids,
)
@classmethod
def begin_pipeline(
cls,
worker_manager: Optional[WorkerManager],
*,
pipeline_text: str,
config: Optional[Dict[str,
Any]],
) -> Optional[WorkerStageSession]:
session_worker_ids: Set[str] = set()
if isinstance(config, dict):
config["_session_worker_ids"] = session_worker_ids
return cls._start_worker_session(
worker_manager,
worker_type="pipeline",
title="Pipeline run",
description=pipeline_text,
pipe_text=pipeline_text,
config=config,
completion_label="Pipeline completed",
error_label="Pipeline error",
session_worker_ids=session_worker_ids,
)
class CmdletIntrospection:
@staticmethod
def cmdlet_names() -> List[str]:
try:
return list_cmdlet_names() or []
except Exception:
return []
@staticmethod
def cmdlet_args(cmd_name: str,
config: Optional[Dict[str,
Any]] = None) -> List[str]:
try:
return get_cmdlet_arg_flags(cmd_name, config=config) or []
except Exception:
return []
@staticmethod
def store_choices(config: Dict[str, Any]) -> List[str]:
try:
from Store import Store
storage = Store(config=config, suppress_debug=True)
return list(storage.list_backends() or [])
except Exception:
return []
@classmethod
def arg_choices(cls,
*,
cmd_name: str,
arg_name: str,
config: Dict[str,
Any]) -> List[str]:
try:
normalized_arg = (arg_name or "").lstrip("-").strip().lower()
if normalized_arg in ("storage", "store"):
backends = cls.store_choices(config)
if backends:
return backends
if normalized_arg == "provider":
canonical_cmd = (cmd_name or "").replace("_", "-").lower()
try:
from ProviderCore.registry import list_search_providers, list_file_providers
except Exception:
list_search_providers = None # type: ignore
list_file_providers = None # type: ignore
provider_choices: List[str] = []
if canonical_cmd in {"add-file"} and list_file_providers is not None:
providers = list_file_providers(config) or {}
available = [
name for name, is_ready in providers.items() if is_ready
]
return sorted(available) if available else sorted(providers.keys())
if list_search_providers is not None:
providers = list_search_providers(config) or {}
available = [
name for name, is_ready in providers.items() if is_ready
]
provider_choices = sorted(available) if available else sorted(
providers.keys()
)
try:
from Provider.metadata_provider import list_metadata_providers
meta_providers = list_metadata_providers(config) or {}
meta_available = [n for n, ready in meta_providers.items() if ready]
meta_choices = (
sorted(meta_available)
if meta_available else sorted(meta_providers.keys())
)
except Exception:
meta_choices = []
merged = sorted(set(provider_choices + meta_choices))
if merged:
return merged
if normalized_arg == "scrape":
try:
from Provider.metadata_provider import list_metadata_providers
meta_providers = list_metadata_providers(config) or {}
if meta_providers:
return sorted(meta_providers.keys())
except Exception:
pass
return get_cmdlet_arg_choices(cmd_name, arg_name) or []
except Exception:
return []
class CmdletCompleter(Completer):
"""Prompt-toolkit completer for the Medeia cmdlet REPL."""
def __init__(self, *, config_loader: "ConfigLoader") -> None:
self._config_loader = config_loader
self.cmdlet_names = CmdletIntrospection.cmdlet_names()
@staticmethod
def _used_arg_logicals(
cmd_name: str,
stage_tokens: List[str],
config: Dict[str,
Any]
) -> Set[str]:
"""Return logical argument names already used in this cmdlet stage.
Example: if the user has typed `download-media -url ...`, then `url`
is considered used and should not be suggested again (even as `--url`).
"""
arg_flags = CmdletIntrospection.cmdlet_args(cmd_name, config)
allowed = {a.lstrip("-").strip().lower()
for a in arg_flags if a}
if not allowed:
return set()
used: Set[str] = set()
for tok in stage_tokens[1:]:
if not tok or not tok.startswith("-"):
continue
if tok in {"-",
"--"}:
continue
# Handle common `-arg=value` form.
raw = tok.split("=", 1)[0]
logical = raw.lstrip("-").strip().lower()
if logical and logical in allowed:
used.add(logical)
return used
def get_completions(
self,
document: Document,
complete_event
): # type: ignore[override]
text = document.text_before_cursor
tokens = text.split()
ends_with_space = bool(text) and text[-1].isspace()
last_pipe = -1
for idx, tok in enumerate(tokens):
if tok == "|":
last_pipe = idx
stage_tokens = tokens[last_pipe + 1:] if last_pipe >= 0 else tokens
if not stage_tokens:
for cmd in self.cmdlet_names:
yield Completion(cmd, start_position=0)
return
if len(stage_tokens) == 1:
current = stage_tokens[0].lower()
if ends_with_space:
cmd_name = current.replace("_", "-")
config = self._config_loader.load()
if cmd_name == "help":
for cmd in self.cmdlet_names:
yield Completion(cmd, start_position=0)
return
if cmd_name not in self.cmdlet_names:
return
arg_names = CmdletIntrospection.cmdlet_args(cmd_name, config)
logical_seen: Set[str] = set()
for arg in arg_names:
arg_low = arg.lower()
if arg_low.startswith("--"):
continue
logical = arg.lstrip("-").lower()
if logical in logical_seen:
continue
yield Completion(arg, start_position=0)
logical_seen.add(logical)
yield Completion("-help", start_position=0)
return
for cmd in self.cmdlet_names:
if cmd.startswith(current):
yield Completion(cmd, start_position=-len(current))
for keyword in ("help", "exit", "quit"):
if keyword.startswith(current):
yield Completion(keyword, start_position=-len(current))
return
cmd_name = stage_tokens[0].replace("_", "-").lower()
if ends_with_space:
current_token = ""
prev_token = stage_tokens[-1].lower()
else:
current_token = stage_tokens[-1].lower()
prev_token = stage_tokens[-2].lower() if len(stage_tokens) > 1 else ""
config = self._config_loader.load()
choices = CmdletIntrospection.arg_choices(
cmd_name=cmd_name,
arg_name=prev_token,
config=config
)
if choices:
for choice in choices:
if choice.lower().startswith(current_token):
yield Completion(choice, start_position=-len(current_token))
return
arg_names = CmdletIntrospection.cmdlet_args(cmd_name, config)
used_logicals = self._used_arg_logicals(cmd_name, stage_tokens, config)
logical_seen: Set[str] = set()
for arg in arg_names:
arg_low = arg.lower()
prefer_single_dash = current_token in {"",
"-"}
if prefer_single_dash and arg_low.startswith("--"):
continue
logical = arg.lstrip("-").lower()
if logical in used_logicals:
continue
if prefer_single_dash and logical in logical_seen:
continue
if arg_low.startswith(current_token):
yield Completion(arg, start_position=-len(current_token))
if prefer_single_dash:
logical_seen.add(logical)
if cmd_name in self.cmdlet_names:
if current_token.startswith("--"):
if "--help".startswith(current_token):
yield Completion("--help", start_position=-len(current_token))
else:
if "-help".startswith(current_token):
yield Completion("-help", start_position=-len(current_token))
class MedeiaLexer(Lexer):
def lex_document(self, document: Document): # type: ignore[override]
def get_line(lineno: int):
line = document.lines[lineno]
tokens: List[tuple[str, str]] = []
pattern = re.compile(
r"""
(\s+) | # 1. Whitespace
(\|) | # 2. Pipe
("(?:[^"\\]|\\.)*"|'(?:[^'\\]|\\.)*') | # 3. Quoted string
([^\s\|]+) # 4. Word
""",
re.VERBOSE,
)
is_cmdlet = True
def _emit_keyed_value(word: str) -> bool:
"""Emit `key:` prefixes (comma-separated) as argument tokens.
Designed for values like:
clip:3m4s-3m14s,1h22m-1h33m,item:2-3
Avoids special-casing URLs (://) and Windows drive paths (C:\\...).
Returns True if it handled the token.
"""
if not word or ":" not in word:
return False
# Avoid URLs and common scheme patterns.
if "://" in word:
return False
# Avoid Windows drive paths (e.g., C:\foo or D:/bar)
if re.match(r"^[A-Za-z]:[\\/]", word):
return False
key_prefix = re.compile(r"^([A-Za-z_][A-Za-z0-9_-]*:)(.*)$")
parts = word.split(",")
handled_any = False
for i, part in enumerate(parts):
if i > 0:
tokens.append(("class:value", ","))
if part == "":
continue
m = key_prefix.match(part)
if m:
tokens.append(("class:argument", m.group(1)))
if m.group(2):
tokens.append(("class:value", m.group(2)))
handled_any = True
else:
tokens.append(("class:value", part))
handled_any = True
return handled_any
for match in pattern.finditer(line):
ws, pipe, quote, word = match.groups()
if ws:
tokens.append(("", ws))
continue
if pipe:
tokens.append(("class:pipe", pipe))
is_cmdlet = True
continue
if quote:
# If the quoted token contains a keyed spec (clip:/item:/hash:),
# highlight the `key:` portion in argument-blue even inside quotes.
if len(quote) >= 2 and quote[0] == quote[-1] and quote[0] in ('"',
"'"):
q = quote[0]
inner = quote[1:-1]
start_index = len(tokens)
if _emit_keyed_value(inner):
# _emit_keyed_value already appended tokens for inner; insert opening quote
# before that chunk, then add the closing quote.
tokens.insert(start_index, ("class:string", q))
tokens.append(("class:string", q))
is_cmdlet = False
continue
tokens.append(("class:string", quote))
is_cmdlet = False
continue
if not word:
continue
if word.startswith("@"): # selection tokens
rest = word[1:]
if rest and re.fullmatch(r"[0-9\-\*,]+", rest):
tokens.append(("class:selection_at", "@"))
tokens.append(("class:selection_range", rest))
is_cmdlet = False
continue
if rest == "":
tokens.append(("class:selection_at", "@"))
is_cmdlet = False
continue
if is_cmdlet:
tokens.append(("class:cmdlet", word))
is_cmdlet = False
elif word.startswith("-"):
tokens.append(("class:argument", word))
else:
if not _emit_keyed_value(word):
tokens.append(("class:value", word))
return tokens
return get_line
class ConfigLoader:
def __init__(self, *, root: Path) -> None:
self._root = root
def load(self) -> Dict[str, Any]:
try:
return deepcopy(load_config(config_dir=self._root))
except Exception:
return {}
class CmdletHelp:
@staticmethod
def show_cmdlet_list() -> None:
try:
metadata = list_cmdlet_metadata() or {}
from rich.box import SIMPLE
from rich.panel import Panel
from rich.table import Table as RichTable
table = RichTable(
show_header=True,
header_style="bold",
box=SIMPLE,
expand=True
)
table.add_column("Cmdlet", no_wrap=True)
table.add_column("Aliases")
table.add_column("Args")
table.add_column("Summary")
for cmd_name in sorted(metadata.keys()):
info = metadata[cmd_name]
aliases = info.get("aliases", [])
args = info.get("args", [])
summary = info.get("summary") or ""
alias_str = ", ".join(
[str(a) for a in (aliases or []) if str(a).strip()]
)
arg_names = [
a.get("name") for a in (args or [])
if isinstance(a, dict) and a.get("name")
]
args_str = ", ".join([str(a) for a in arg_names if str(a).strip()])
table.add_row(str(cmd_name), alias_str, args_str, str(summary))
stdout_console().print(Panel(table, title="Cmdlets", expand=False))
except Exception as exc:
from rich.panel import Panel
from rich.text import Text
stderr_console().print(
Panel(Text(f"Error: {exc}"),
title="Error",
expand=False)
)
@staticmethod
def show_cmdlet_help(cmd_name: str) -> None:
try:
meta = get_cmdlet_metadata(cmd_name)
if meta:
CmdletHelp._print_metadata(cmd_name, meta)
return
print(f"Unknown command: {cmd_name}\n")
except Exception as exc:
print(f"Error: {exc}\n")
@staticmethod
def _print_metadata(cmd_name: str, data: Any) -> None:
d = data.to_dict() if hasattr(data, "to_dict") else data
if not isinstance(d, dict):
from rich.panel import Panel
from rich.text import Text
stderr_console().print(
Panel(
Text(f"Invalid metadata for {cmd_name}"),
title="Error",
expand=False
)
)
return
name = d.get("name", cmd_name)
summary = d.get("summary", "")
usage = d.get("usage", "")
description = d.get("description", "")
args = d.get("args", [])
details = d.get("details", [])
from rich.box import SIMPLE
from rich.console import Group
from rich.panel import Panel
from rich.table import Table as RichTable
from rich.text import Text
header = Text.assemble((str(name), "bold"))
synopsis = Text(str(usage or name))
stdout_console().print(
Panel(Group(header,
synopsis),
title="Help",
expand=False)
)
if summary or description:
desc_bits: List[Text] = []
if summary:
desc_bits.append(Text(str(summary)))
if description:
desc_bits.append(Text(str(description)))
stdout_console().print(
Panel(Group(*desc_bits),
title="Description",
expand=False)
)
if args and isinstance(args, list):
param_table = RichTable(
show_header=True,
header_style="bold",
box=SIMPLE,
expand=True
)
param_table.add_column("Arg", no_wrap=True)
param_table.add_column("Type", no_wrap=True)
param_table.add_column("Required", no_wrap=True)
param_table.add_column("Description")
for arg in args:
if isinstance(arg, dict):
name_str = arg.get("name", "?")
typ = arg.get("type", "string")
required = bool(arg.get("required", False))
desc = arg.get("description", "")
else:
name_str = getattr(arg, "name", "?")
typ = getattr(arg, "type", "string")
required = bool(getattr(arg, "required", False))
desc = getattr(arg, "description", "")
param_table.add_row(
f"-{name_str}",
str(typ),
"yes" if required else "no",
str(desc or "")
)
stdout_console().print(Panel(param_table, title="Parameters", expand=False))
if details:
stdout_console().print(
Panel(
Group(*[Text(str(x)) for x in details]),
title="Remarks",
expand=False
)
)
class CmdletExecutor:
def __init__(self, *, config_loader: ConfigLoader) -> None:
self._config_loader = config_loader
@staticmethod
def _get_table_title_for_command(
cmd_name: str,
emitted_items: Optional[List[Any]] = None,
cmd_args: Optional[List[str]] = None,
) -> str:
title_map = {
"search-file": "Results",
"search_file": "Results",
"download-data": "Downloads",
"download_data": "Downloads",
"download-file": "Downloads",
"download_file": "Downloads",
"get-tag": "Tags",
"get_tag": "Tags",
"get-file": "Results",
"get_file": "Results",
"add-tags": "Results",
"add_tags": "Results",
"delete-tag": "Results",
"delete_tag": "Results",
"add-url": "Results",
"add_url": "Results",
"get-url": "url",
"get_url": "url",
"delete-url": "Results",
"delete_url": "Results",
"get-note": "Notes",
"get_note": "Notes",
"add-note": "Results",
"add_note": "Results",
"delete-note": "Results",
"delete_note": "Results",
"get-relationship": "Relationships",
"get_relationship": "Relationships",
"add-relationship": "Results",
"add_relationship": "Results",
"add-file": "Results",
"add_file": "Results",
"delete-file": "Results",
"delete_file": "Results",
"get-metadata": None,
"get_metadata": None,
}
mapped = title_map.get(cmd_name, "Results")
if mapped is not None:
return mapped
if emitted_items:
first = emitted_items[0]
try:
if isinstance(first, dict) and first.get("title"):
return str(first.get("title"))
if hasattr(first, "title") and getattr(first, "title"):
return str(getattr(first, "title"))
except Exception:
pass
return "Results"
def execute(self, cmd_name: str, args: List[str]) -> None:
from SYS import pipeline as ctx
from cmdlet import REGISTRY
ensure_registry_loaded()
# REPL guard: stage-local selection tables should not leak across independent
# commands. @ selection can always re-seed from the last result table.
try:
if hasattr(ctx, "set_current_stage_table"):
ctx.set_current_stage_table(None)
except Exception:
pass
cmd_fn = REGISTRY.get(cmd_name)
if not cmd_fn:
# Lazy-import module and register its CMDLET.
try:
mod = import_cmd_module(cmd_name)
data = getattr(mod, "CMDLET", None) if mod else None
if data and hasattr(data, "exec") and callable(getattr(data, "exec")):
run_fn = getattr(data, "exec")
REGISTRY[cmd_name] = run_fn
cmd_fn = run_fn
except Exception:
cmd_fn = None
if not cmd_fn:
print(f"Unknown command: {cmd_name}\n")
return
config = self._config_loader.load()
# ------------------------------------------------------------------
# Single-command Live pipeline progress (match REPL behavior)
# ------------------------------------------------------------------
progress_ui = None
pipe_idx: Optional[int] = None
def _maybe_start_single_live_progress(
*,
cmd_name_norm: str,
filtered_args: List[str],
piped_input: Any,
config: Any,
) -> None:
nonlocal progress_ui, pipe_idx
# Keep behavior consistent with pipeline runner exclusions.
# Some commands render their own Rich UI (tables/panels) and don't
# play nicely with Live cursor control.
if cmd_name_norm in {
"get-relationship",
"get-rel",
".pipe",
".matrix",
".telegram",
"telegram",
"delete-file",
"del-file",
}:
return
# add-file directory selector mode: show only the selection table, no Live progress.
if cmd_name_norm in {"add-file",
"add_file"}:
try:
from pathlib import Path as _Path
toks = list(filtered_args or [])
i = 0
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
if low in {"-path",
"--path",
"-p"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and ("," not in nxt):
p = _Path(nxt)
if p.exists() and p.is_dir():
return
i += 2
continue
i += 1
except Exception:
pass
try:
quiet_mode = (
bool(config.get("_quiet_background_output"))
if isinstance(config,
dict) else False
)
except Exception:
quiet_mode = False
if quiet_mode:
return
try:
import sys as _sys
if not bool(getattr(_sys.stderr, "isatty", lambda: False)()):
return
except Exception:
return
try:
from SYS.models import PipelineLiveProgress
progress_ui = PipelineLiveProgress([cmd_name_norm], enabled=True)
progress_ui.start()
try:
if hasattr(ctx, "set_live_progress"):
ctx.set_live_progress(progress_ui)
except Exception:
pass
pipe_idx = 0
# Estimate per-item task count for the single pipe.
total_items = 1
preview_items: Optional[List[Any]] = None
try:
if isinstance(piped_input, list):
total_items = max(1, int(len(piped_input)))
preview_items = list(piped_input)
elif piped_input is not None:
total_items = 1
preview_items = [piped_input]
else:
preview: List[Any] = []
toks = list(filtered_args or [])
i = 0
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
if (cmd_name_norm in {"add-file",
"add_file"} and low in {"-path",
"--path",
"-p"}
and i + 1 < len(toks)):
nxt = str(toks[i + 1])
if nxt:
if "," in nxt:
parts = [
p.strip().strip("\"'")
for p in nxt.split(",")
]
parts = [p for p in parts if p]
if parts:
preview.extend(parts)
i += 2
continue
else:
preview.append(nxt)
i += 2
continue
if low in {"-url",
"--url"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and not nxt.startswith("-"):
preview.append(nxt)
i += 2
continue
if (not t.startswith("-")) and ("://" in low
or low.startswith(
("magnet:",
"torrent:"))):
preview.append(t)
i += 1
preview_items = preview if preview else None
total_items = max(1, int(len(preview)) if preview else 1)
except Exception:
total_items = 1
preview_items = None
try:
progress_ui.begin_pipe(
0,
total_items=int(total_items),
items_preview=preview_items
)
except Exception:
pass
except Exception:
progress_ui = None
pipe_idx = None
filtered_args: List[str] = []
selected_indices: List[int] = []
select_all = False
selection_filters: List[List[Tuple[str, str]]] = []
value_flags: Set[str] = set()
try:
meta = get_cmdlet_metadata(cmd_name)
raw = meta.get("raw") if isinstance(meta, dict) else None
arg_specs = getattr(raw, "arg", None) if raw is not None else None
if isinstance(arg_specs, list):
for spec in arg_specs:
spec_type = str(getattr(spec,
"type",
"string") or "string").strip().lower()
if spec_type == "flag":
continue
spec_name = str(getattr(spec, "name", "") or "")
canonical = spec_name.lstrip("-").strip()
if not canonical:
continue
value_flags.add(f"-{canonical}".lower())
value_flags.add(f"--{canonical}".lower())
alias = str(getattr(spec, "alias", "") or "").strip()
if alias:
value_flags.add(f"-{alias}".lower())
except Exception:
value_flags = set()
for i, arg in enumerate(args):
if isinstance(arg, str) and arg.startswith("@"): # selection candidate
prev = str(args[i - 1]).lower() if i > 0 else ""
if prev in value_flags:
filtered_args.append(arg)
continue
# Universal selection filter: @"COL:expr" (quotes may be stripped by tokenization)
filter_spec = SelectionFilterSyntax.parse(arg)
if filter_spec is not None:
selection_filters.append(filter_spec)
continue
if arg.strip() == "@*":
select_all = True
continue
selection = SelectionSyntax.parse(arg)
if selection is not None:
zero_based = sorted(idx - 1 for idx in selection)
for idx in zero_based:
if idx not in selected_indices:
selected_indices.append(idx)
continue
filtered_args.append(arg)
continue
filtered_args.append(str(arg))
# IMPORTANT: Do not implicitly feed the previous command's results into
# a new command unless the user explicitly selected items via @ syntax.
# Piping should require `|` (or an explicit @ selection).
piped_items = ctx.get_last_result_items()
result: Any = None
effective_selected_indices: List[int] = []
if piped_items and (select_all or selected_indices or selection_filters):
candidate_idxs = list(range(len(piped_items)))
for spec in selection_filters:
candidate_idxs = [
i for i in candidate_idxs
if SelectionFilterSyntax.matches(piped_items[i], spec)
]
if select_all:
effective_selected_indices = list(candidate_idxs)
elif selected_indices:
effective_selected_indices = [
candidate_idxs[i] for i in selected_indices
if 0 <= i < len(candidate_idxs)
]
else:
effective_selected_indices = list(candidate_idxs)
result = [piped_items[i] for i in effective_selected_indices]
worker_manager = WorkerManagerRegistry.ensure(config)
stage_session = WorkerStages.begin_stage(
worker_manager,
cmd_name=cmd_name,
stage_tokens=[cmd_name,
*filtered_args],
config=config,
command_text=" ".join([cmd_name,
*filtered_args]).strip() or cmd_name,
)
stage_worker_id = stage_session.worker_id if stage_session else None
# Start live progress after we know the effective cmd + args + piped input.
cmd_norm = str(cmd_name or "").replace("_", "-").strip().lower()
_maybe_start_single_live_progress(
cmd_name_norm=cmd_norm or str(cmd_name or "").strip().lower(),
filtered_args=filtered_args,
piped_input=result,
config=config,
)
on_emit = None
if progress_ui is not None and pipe_idx is not None:
_ui = progress_ui
def _on_emit(obj: Any, _progress=_ui) -> None:
try:
_progress.on_emit(0, obj)
except Exception:
pass
on_emit = _on_emit
pipeline_ctx = ctx.PipelineStageContext(
stage_index=0,
total_stages=1,
pipe_index=pipe_idx if pipe_idx is not None else 0,
worker_id=stage_worker_id,
on_emit=on_emit,
)
ctx.set_stage_context(pipeline_ctx)
stage_status = "completed"
stage_error = ""
ctx.set_last_selection(effective_selected_indices)
try:
try:
if hasattr(ctx, "set_current_cmdlet_name"):
ctx.set_current_cmdlet_name(cmd_name)
except Exception:
pass
try:
if hasattr(ctx, "set_current_stage_text"):
raw_stage = ""
try:
raw_stage = (
ctx.get_current_command_text("")
if hasattr(ctx,
"get_current_command_text") else ""
)
except Exception:
raw_stage = ""
if raw_stage:
ctx.set_current_stage_text(raw_stage)
else:
ctx.set_current_stage_text(
" ".join([cmd_name,
*filtered_args]).strip() or cmd_name
)
except Exception:
pass
ret_code = cmd_fn(result, filtered_args, config)
if getattr(pipeline_ctx, "emits", None):
emits = list(pipeline_ctx.emits)
# Shared `-path` behavior: if the cmdlet emitted temp/PATH file artifacts,
# move them to the user-specified destination and update emitted paths.
try:
from cmdlet import _shared as sh
emits = sh.apply_output_path_from_pipeobjects(
cmd_name=cmd_name,
args=filtered_args,
emits=emits
)
try:
pipeline_ctx.emits = list(emits)
except Exception:
pass
except Exception:
pass
# Detect format-selection emits and skip printing (user selects with @N).
is_format_selection = False
if emits:
first_emit = emits[0]
if isinstance(first_emit, dict) and "format_id" in first_emit:
is_format_selection = True
if is_format_selection:
ctx.set_last_result_items_only(emits)
else:
table_title = self._get_table_title_for_command(
cmd_name,
emits,
filtered_args
)
selectable_commands = {
"search-file",
"download-data",
"download-media",
"download-file",
"search_file",
"download_data",
"download_media",
"download_file",
".config",
".worker",
}
display_only_commands = {
"get-url",
"get_url",
"get-note",
"get_note",
"get-relationship",
"get_relationship",
"get-file",
"get_file",
}
self_managing_commands = {
"get-tag",
"get_tag",
"tags",
"search-file",
"search_file",
}
if cmd_name in self_managing_commands:
table = ctx.get_last_result_table()
if table is None:
table = ResultTable(table_title)
for emitted in emits:
table.add_result(emitted)
else:
table = ResultTable(table_title)
for emitted in emits:
table.add_result(emitted)
if cmd_name in selectable_commands:
table.set_source_command(cmd_name, filtered_args)
ctx.set_last_result_table(table, emits)
ctx.set_current_stage_table(None)
elif cmd_name in display_only_commands:
ctx.set_last_result_items_only(emits)
else:
ctx.set_last_result_items_only(emits)
# Stop Live progress before printing tables.
if progress_ui is not None:
try:
if pipe_idx is not None:
progress_ui.finish_pipe(
int(pipe_idx),
force_complete=(stage_status == "completed")
)
except Exception:
pass
try:
progress_ui.stop()
except Exception:
pass
try:
if hasattr(ctx, "set_live_progress"):
ctx.set_live_progress(None)
except Exception:
pass
progress_ui = None
pipe_idx = None
stdout_console().print()
stdout_console().print(table)
# If the cmdlet produced a current-stage table without emits (e.g. format selection),
# render it here for parity with REPL pipeline runner.
if (not getattr(pipeline_ctx,
"emits",
None)) and hasattr(ctx,
"get_current_stage_table"):
try:
stage_table = ctx.get_current_stage_table()
except Exception:
stage_table = None
if stage_table is not None:
try:
already_rendered = bool(
getattr(stage_table,
"_rendered_by_cmdlet",
False)
)
except Exception:
already_rendered = False
if already_rendered:
return
if progress_ui is not None:
try:
if pipe_idx is not None:
progress_ui.finish_pipe(
int(pipe_idx),
force_complete=(stage_status == "completed")
)
except Exception:
pass
try:
progress_ui.stop()
except Exception:
pass
try:
if hasattr(ctx, "set_live_progress"):
ctx.set_live_progress(None)
except Exception:
pass
progress_ui = None
pipe_idx = None
stdout_console().print()
stdout_console().print(stage_table)
if ret_code != 0:
stage_status = "failed"
stage_error = f"exit code {ret_code}"
print(f"[exit code: {ret_code}]\n")
except Exception as exc:
stage_status = "failed"
stage_error = f"{type(exc).__name__}: {exc}"
print(f"[error] {type(exc).__name__}: {exc}\n")
finally:
if progress_ui is not None:
try:
if pipe_idx is not None:
progress_ui.finish_pipe(
int(pipe_idx),
force_complete=(stage_status == "completed")
)
except Exception:
pass
try:
progress_ui.stop()
except Exception:
pass
try:
if hasattr(ctx, "set_live_progress"):
ctx.set_live_progress(None)
except Exception:
pass
# Do not keep stage tables around after a single command; it can cause
# later @ selections to bind to stale tables (e.g. old add-file scans).
try:
if hasattr(ctx, "set_current_stage_table"):
ctx.set_current_stage_table(None)
except Exception:
pass
try:
if hasattr(ctx, "clear_current_cmdlet_name"):
ctx.clear_current_cmdlet_name()
except Exception:
pass
try:
if hasattr(ctx, "clear_current_stage_text"):
ctx.clear_current_stage_text()
except Exception:
pass
ctx.clear_last_selection()
if stage_session:
stage_session.close(status=stage_status, error_msg=stage_error)
class PipelineExecutor:
def __init__(self, *, config_loader: ConfigLoader) -> None:
self._config_loader = config_loader
self._toolbar_output: Optional[Callable[[str], None]] = None
def set_toolbar_output(self, output: Optional[Callable[[str], None]]) -> None:
self._toolbar_output = output
@staticmethod
def _split_stages(tokens: Sequence[str]) -> List[List[str]]:
stages: List[List[str]] = []
current: List[str] = []
for token in tokens:
if token == "|":
if current:
stages.append(current)
current = []
else:
current.append(token)
if current:
stages.append(current)
return stages
@staticmethod
def _validate_download_media_relationship_order(stages: List[List[str]]) -> bool:
"""Guard against running add-relationship on unstored download-media results.
Intended UX:
download-media ... | add-file -store <store> | add-relationship
Rationale:
download-media outputs items that may not yet have a stable store+hash.
add-relationship is designed to operate in store/hash mode.
"""
def _norm(name: str) -> str:
return str(name or "").replace("_", "-").strip().lower()
names: List[str] = []
for stage in stages or []:
if not stage:
continue
names.append(_norm(stage[0]))
dl_idxs = [i for i, n in enumerate(names) if n == "download-media"]
rel_idxs = [i for i, n in enumerate(names) if n == "add-relationship"]
add_file_idxs = [i for i, n in enumerate(names) if n == "add-file"]
if not dl_idxs or not rel_idxs:
return True
# If download-media is upstream of add-relationship, require an add-file in between.
for rel_i in rel_idxs:
dl_before = [d for d in dl_idxs if d < rel_i]
if not dl_before:
continue
dl_i = max(dl_before)
if not any(dl_i < a < rel_i for a in add_file_idxs):
print(
"Pipeline order error: when using download-media with add-relationship, "
"add-relationship must come after add-file (so items are stored and have store+hash).\n"
"Example: download-media <...> | add-file -store <store> | add-relationship\n"
)
return False
return True
@staticmethod
def _try_clear_pipeline_stop(ctx: Any) -> None:
try:
if hasattr(ctx, "clear_pipeline_stop"):
ctx.clear_pipeline_stop()
except Exception:
pass
@staticmethod
def _maybe_seed_current_stage_table(ctx: Any) -> None:
try:
if hasattr(ctx,
"get_current_stage_table") and not ctx.get_current_stage_table():
display_table = (
ctx.get_display_table() if hasattr(ctx,
"get_display_table") else None
)
if display_table:
ctx.set_current_stage_table(display_table)
else:
last_table = (
ctx.get_last_result_table()
if hasattr(ctx,
"get_last_result_table") else None
)
if last_table:
ctx.set_current_stage_table(last_table)
except Exception:
pass
@staticmethod
def _maybe_apply_pending_pipeline_tail(ctx: Any,
stages: List[List[str]]) -> List[List[str]]:
try:
pending_tail = (
ctx.get_pending_pipeline_tail()
if hasattr(ctx,
"get_pending_pipeline_tail") else []
)
pending_source = (
ctx.get_pending_pipeline_source()
if hasattr(ctx,
"get_pending_pipeline_source") else None
)
except Exception:
pending_tail = []
pending_source = None
try:
current_source = (
ctx.get_current_stage_table_source_command()
if hasattr(ctx,
"get_current_stage_table_source_command") else None
)
except Exception:
current_source = None
try:
effective_source = current_source or (
ctx.get_last_result_table_source_command()
if hasattr(ctx,
"get_last_result_table_source_command") else None
)
except Exception:
effective_source = current_source
selection_only = bool(
len(stages) == 1 and stages[0] and stages[0][0].startswith("@")
)
if pending_tail and selection_only:
if (pending_source is None) or (effective_source
and pending_source == effective_source):
stages = list(stages) + list(pending_tail)
try:
if hasattr(ctx, "clear_pending_pipeline_tail"):
ctx.clear_pending_pipeline_tail()
except Exception:
pass
else:
try:
if hasattr(ctx, "clear_pending_pipeline_tail"):
ctx.clear_pending_pipeline_tail()
except Exception:
pass
return stages
def _apply_quiet_background_flag(self, config: Any) -> Any:
if isinstance(config, dict):
# This executor is used by both the REPL and the `pipeline` subcommand.
# Quiet/background mode is helpful for detached/background runners, but
# it suppresses interactive UX (like the pipeline Live progress UI).
config["_quiet_background_output"] = bool(self._toolbar_output is None)
return config
@staticmethod
def _extract_first_stage_selection_tokens(
stages: List[List[str]],
) -> tuple[List[List[str]],
List[int],
bool,
bool]:
first_stage_tokens = stages[0] if stages else []
first_stage_selection_indices: List[int] = []
first_stage_had_extra_args = False
first_stage_select_all = False
if first_stage_tokens:
new_first_stage: List[str] = []
for token in first_stage_tokens:
if token.startswith("@"): # selection
selection = SelectionSyntax.parse(token)
if selection is not None:
first_stage_selection_indices = sorted(
[i - 1 for i in selection]
)
continue
if token == "@*":
first_stage_select_all = True
continue
new_first_stage.append(token)
if new_first_stage:
stages = list(stages)
stages[0] = new_first_stage
if first_stage_selection_indices or first_stage_select_all:
first_stage_had_extra_args = True
elif first_stage_selection_indices or first_stage_select_all:
stages = list(stages)
stages.pop(0)
return (
stages,
first_stage_selection_indices,
first_stage_had_extra_args,
first_stage_select_all,
)
@staticmethod
def _apply_select_all_if_requested(ctx: Any,
indices: List[int],
select_all: bool) -> List[int]:
if not select_all:
return indices
try:
last_items = ctx.get_last_result_items()
except Exception:
last_items = None
if last_items:
return list(range(len(last_items)))
return indices
@staticmethod
def _maybe_run_class_selector(
ctx: Any,
config: Any,
selected_items: list,
*,
stage_is_last: bool
) -> bool:
if not stage_is_last:
return False
candidates: list[str] = []
seen: set[str] = set()
def _add(value) -> None:
try:
text = str(value or "").strip().lower()
except Exception:
return
if not text or text in seen:
return
seen.add(text)
candidates.append(text)
try:
current_table = ctx.get_current_stage_table() or ctx.get_last_result_table()
_add(
current_table.
table if current_table and hasattr(current_table,
"table") else None
)
except Exception:
pass
for item in selected_items or []:
if isinstance(item, dict):
_add(item.get("provider"))
_add(item.get("store"))
_add(item.get("table"))
else:
_add(getattr(item, "provider", None))
_add(getattr(item, "store", None))
_add(getattr(item, "table", None))
try:
from ProviderCore.registry import get_provider, is_known_provider_name
except Exception:
get_provider = None # type: ignore
is_known_provider_name = None # type: ignore
if get_provider is not None:
for key in candidates:
try:
if is_known_provider_name is not None and (
not is_known_provider_name(key)):
continue
except Exception:
# If the predicate fails for any reason, fall back to legacy behavior.
pass
try:
provider = get_provider(key, config)
except Exception:
continue
selector = getattr(provider, "selector", None)
if selector is None:
continue
try:
handled = bool(
selector(selected_items,
ctx=ctx,
stage_is_last=True)
)
except Exception as exc:
print(f"{key} selector failed: {exc}\n")
return True
if handled:
return True
store_keys: list[str] = []
for item in selected_items or []:
if isinstance(item, dict):
v = item.get("store")
else:
v = getattr(item, "store", None)
name = str(v or "").strip()
if name:
store_keys.append(name)
if store_keys:
try:
from Store.registry import Store as StoreRegistry
store_registry = StoreRegistry(config, suppress_debug=True)
_backend_names = list(store_registry.list_backends() or [])
_backend_by_lower = {
str(n).lower(): str(n)
for n in _backend_names if str(n).strip()
}
for name in store_keys:
resolved_name = name
if not store_registry.is_available(resolved_name):
resolved_name = _backend_by_lower.get(str(name).lower(), name)
if not store_registry.is_available(resolved_name):
continue
backend = store_registry[resolved_name]
selector = getattr(backend, "selector", None)
if selector is None:
continue
handled = bool(
selector(selected_items,
ctx=ctx,
stage_is_last=True)
)
if handled:
return True
except Exception:
pass
return False
@staticmethod
def _maybe_open_url_selection(
current_table: Any,
selected_items: list,
*,
stage_is_last: bool
) -> bool:
if not stage_is_last:
return False
if not selected_items or len(selected_items) != 1:
return False
table_type = ""
source_cmd = ""
try:
table_type = str(getattr(current_table, "table", "") or "").strip().lower()
except Exception:
table_type = ""
try:
source_cmd = (
str(getattr(current_table,
"source_command",
"") or "").strip().replace("_",
"-").lower()
)
except Exception:
source_cmd = ""
if table_type != "url" and source_cmd != "get-url":
return False
item = selected_items[0]
url = None
try:
from cmdlet._shared import get_field
url = get_field(item, "url")
except Exception:
try:
url = item.get("url") if isinstance(item,
dict
) else getattr(item,
"url",
None)
except Exception:
url = None
url_text = str(url or "").strip()
if not url_text:
return False
try:
import webbrowser
webbrowser.open(url_text, new=2)
return True
except Exception:
return False
def _maybe_enable_background_notifier(
self,
worker_manager: Any,
config: Any,
pipeline_session: Any
) -> None:
if not (pipeline_session and worker_manager and isinstance(config, dict)):
return
session_worker_ids = config.get("_session_worker_ids")
if not session_worker_ids:
return
try:
output_fn = self._toolbar_output
quiet_mode = bool(config.get("_quiet_background_output"))
terminal_only = quiet_mode and not output_fn
kwargs: Dict[str,
Any] = {
"session_worker_ids": session_worker_ids,
"only_terminal_updates": terminal_only,
"overlay_mode": bool(output_fn),
}
if output_fn:
kwargs["output"] = output_fn
ensure_background_notifier(worker_manager, **kwargs)
except Exception:
pass
@staticmethod
def _get_raw_stage_texts(ctx: Any) -> List[str]:
raw_stage_texts: List[str] = []
try:
if hasattr(ctx, "get_current_command_stages"):
raw_stage_texts = ctx.get_current_command_stages() or []
except Exception:
raw_stage_texts = []
return raw_stage_texts
def _maybe_apply_initial_selection(
self,
ctx: Any,
config: Any,
stages: List[List[str]],
*,
selection_indices: List[int],
first_stage_had_extra_args: bool,
worker_manager: Any,
pipeline_session: Any,
) -> tuple[bool,
Any]:
if not selection_indices:
return True, None
try:
if not ctx.get_current_stage_table_source_command():
display_table = (
ctx.get_display_table() if hasattr(ctx,
"get_display_table") else None
)
table_for_stage = display_table or ctx.get_last_result_table()
if table_for_stage:
ctx.set_current_stage_table(table_for_stage)
except Exception:
pass
source_cmd = None
source_args_raw = None
try:
source_cmd = ctx.get_current_stage_table_source_command()
source_args_raw = ctx.get_current_stage_table_source_args()
except Exception:
source_cmd = None
source_args_raw = None
if isinstance(source_args_raw, str):
source_args: List[str] = [source_args_raw]
elif isinstance(source_args_raw, list):
source_args = [str(x) for x in source_args_raw if x is not None]
else:
source_args = []
current_table = None
try:
current_table = ctx.get_current_stage_table()
except Exception:
current_table = None
table_type = (
current_table.table if current_table and hasattr(current_table,
"table") else None
)
command_expanded = False
if table_type in {"youtube",
"soulseek"}:
command_expanded = False
elif source_cmd == "search-file" and source_args and "youtube" in source_args:
command_expanded = False
else:
selected_row_args: List[str] = []
skip_pipe_expansion = source_cmd == ".pipe" and len(stages) > 0
# Command expansion via @N:
# - Default behavior: expand ONLY for single-row selections.
# - Special case: allow multi-row expansion for add-file directory tables by
# combining selected rows into a single `-path file1,file2,...` argument.
if source_cmd and not skip_pipe_expansion:
src = str(source_cmd).replace("_", "-").strip().lower()
if src == "add-file" and selection_indices:
row_args_list: List[List[str]] = []
for idx in selection_indices:
try:
row_args = ctx.get_current_stage_table_row_selection_args(
idx
)
except Exception:
row_args = None
if isinstance(row_args, list) and row_args:
row_args_list.append(
[str(x) for x in row_args if x is not None]
)
# Combine `['-path', <file>]` from each row into one `-path` arg.
paths: List[str] = []
can_merge = bool(row_args_list) and (
len(row_args_list) == len(selection_indices)
)
if can_merge:
for ra in row_args_list:
if len(ra) == 2 and str(ra[0]).strip().lower() in {
"-path",
"--path",
"-p",
}:
p = str(ra[1]).strip()
if p:
paths.append(p)
else:
can_merge = False
break
if can_merge and paths:
selected_row_args.extend(["-path", ",".join(paths)])
elif len(selection_indices) == 1 and row_args_list:
selected_row_args.extend(row_args_list[0])
else:
# Only perform @N command expansion for *single-item* selections.
# For multi-item selections (e.g. @*, @1-5), expanding to one row
# would silently drop items. In those cases we pipe items downstream.
if len(selection_indices) == 1:
idx = selection_indices[0]
row_args = ctx.get_current_stage_table_row_selection_args(idx)
if row_args:
selected_row_args.extend(row_args)
if selected_row_args:
if isinstance(source_cmd, list):
cmd_list: List[str] = [str(x) for x in source_cmd if x is not None]
elif isinstance(source_cmd, str):
cmd_list = [source_cmd]
else:
cmd_list = []
expanded_stage: List[str] = cmd_list + source_args + selected_row_args
if first_stage_had_extra_args and stages:
expanded_stage += stages[0]
stages[0] = expanded_stage
else:
stages.insert(0, expanded_stage)
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
pipeline_session.worker_id,
f"@N expansion: {source_cmd} + {' '.join(str(x) for x in selected_row_args)}",
)
except Exception:
pass
selection_indices = []
command_expanded = True
if (not command_expanded) and selection_indices:
stage_table = None
try:
stage_table = ctx.get_current_stage_table()
except Exception:
stage_table = None
display_table = None
try:
display_table = (
ctx.get_display_table() if hasattr(ctx,
"get_display_table") else None
)
except Exception:
display_table = None
if not stage_table and display_table is not None:
stage_table = display_table
if not stage_table:
try:
stage_table = ctx.get_last_result_table()
except Exception:
stage_table = None
# Prefer selecting from the last selectable *table* (search/playlist)
# rather than from display-only emitted items, unless we're explicitly
# selecting from an overlay table.
try:
if display_table is not None and stage_table is display_table:
items_list = ctx.get_last_result_items() or []
else:
if hasattr(ctx, "get_last_selectable_result_items"):
items_list = ctx.get_last_selectable_result_items() or []
else:
items_list = ctx.get_last_result_items() or []
except Exception:
items_list = []
resolved_items = items_list if items_list else []
if items_list:
filtered = [
resolved_items[i] for i in selection_indices
if 0 <= i < len(resolved_items)
]
if not filtered:
print("No items matched selection in pipeline\n")
return False, None
if PipelineExecutor._maybe_run_class_selector(
ctx,
config,
filtered,
stage_is_last=(not stages)):
return False, None
from cmdlet._shared import coerce_to_pipe_object
filtered_pipe_objs = [coerce_to_pipe_object(item) for item in filtered]
piped_result = (
filtered_pipe_objs
if len(filtered_pipe_objs) > 1 else filtered_pipe_objs[0]
)
if pipeline_session and worker_manager:
try:
selection_parts = [f"@{i+1}" for i in selection_indices]
worker_manager.log_step(
pipeline_session.worker_id,
f"Applied @N selection {' | '.join(selection_parts)}",
)
except Exception:
pass
# Auto-insert downloader stages for provider tables.
try:
current_table = ctx.get_current_stage_table(
) or ctx.get_last_result_table()
except Exception:
current_table = None
table_type = (
current_table.table
if current_table and hasattr(current_table,
"table") else None
)
if not stages:
if table_type == "youtube":
print("Auto-running YouTube selection via download-media")
stages.append(["download-media"])
elif table_type == "bandcamp":
print("Auto-running Bandcamp selection via download-media")
stages.append(["download-media"])
elif table_type == "internetarchive":
print("Auto-loading Internet Archive item via download-file")
stages.append(["download-file"])
elif table_type == "podcastindex.episodes":
print("Auto-piping selection to download-file")
stages.append(["download-file"])
elif table_type in {"soulseek",
"openlibrary",
"libgen"}:
print("Auto-piping selection to download-file")
stages.append(["download-file"])
else:
first_cmd = stages[0][0] if stages and stages[0] else None
if table_type == "soulseek" and first_cmd not in (
"download-file",
"download-media",
"download_media",
".pipe",
):
debug("Auto-inserting download-file after Soulseek selection")
stages.insert(0, ["download-file"])
if table_type == "youtube" and first_cmd not in (
"download-media",
"download_media",
"download-file",
".pipe",
):
debug("Auto-inserting download-media after YouTube selection")
stages.insert(0, ["download-media"])
if table_type == "bandcamp" and first_cmd not in (
"download-media",
"download_media",
"download-file",
".pipe",
):
print("Auto-inserting download-media after Bandcamp selection")
stages.insert(0, ["download-media"])
if table_type == "internetarchive" and first_cmd not in (
"download-file",
"download-media",
"download_media",
".pipe",
):
debug(
"Auto-inserting download-file after Internet Archive selection"
)
stages.insert(0, ["download-file"])
if table_type == "podcastindex.episodes" and first_cmd not in (
"download-file",
"download-media",
"download_media",
".pipe",
):
print("Auto-inserting download-file after PodcastIndex episode selection")
stages.insert(0, ["download-file"])
if table_type == "libgen" and first_cmd not in (
"download-file",
"download-media",
"download_media",
".pipe",
):
print("Auto-inserting download-file after Libgen selection")
stages.insert(0, ["download-file"])
return True, piped_result
else:
print("No previous results to select from\n")
return False, None
return True, None
@staticmethod
def _maybe_start_live_progress(config: Any,
stages: List[List[str]]) -> tuple[Any,
Dict[int,
int]]:
progress_ui = None
pipe_index_by_stage: Dict[int,
int] = {}
try:
quiet_mode = (
bool(config.get("_quiet_background_output"))
if isinstance(config,
dict) else False
)
except Exception:
quiet_mode = False
try:
import sys as _sys
if (not quiet_mode) and bool(getattr(_sys.stderr,
"isatty", lambda: False)()):
from SYS.models import PipelineLiveProgress
pipe_stage_indices: List[int] = []
pipe_labels: List[str] = []
for idx, stage_tokens in enumerate(stages):
if not stage_tokens:
continue
name = str(stage_tokens[0]).replace("_", "-").lower()
if name == "@" or name.startswith("@"):
continue
# add-file directory selector stage: avoid Live progress so the
# selection table renders cleanly.
if name in {"add-file",
"add_file"}:
try:
from pathlib import Path as _Path
toks = list(stage_tokens[1:])
i = 0
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
if low in {"-path",
"--path",
"-p"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and ("," not in nxt):
p = _Path(nxt)
if p.exists() and p.is_dir():
name = "" # mark as skipped
break
i += 2
continue
i += 1
except Exception:
pass
if not name:
continue
# Display-only: avoid Live progress for relationship viewing.
# This keeps `@1 | get-relationship` clean and prevents progress UI
# from interfering with Rich tables/panels.
if name in {"get-relationship",
"get-rel"}:
continue
# `.pipe` (MPV) is an interactive launcher; disable pipeline Live progress
# for it because it doesn't meaningfully "complete" (mpv may keep running)
# and Live output interferes with MPV playlist UI.
if name == ".pipe":
continue
# `.matrix` uses a two-phase picker (@N then .matrix -send). Pipeline Live
# progress can linger across those phases and interfere with interactive output.
if name == ".matrix":
continue
# `delete-file` prints a Rich table directly; Live progress interferes and
# can truncate/overwrite the output.
if name in {"delete-file",
"del-file"}:
continue
pipe_stage_indices.append(idx)
pipe_labels.append(name)
if pipe_labels:
progress_ui = PipelineLiveProgress(pipe_labels, enabled=True)
progress_ui.start()
try:
from SYS import pipeline as _pipeline_ctx
if hasattr(_pipeline_ctx, "set_live_progress"):
_pipeline_ctx.set_live_progress(progress_ui)
except Exception:
pass
pipe_index_by_stage = {
stage_idx: pipe_idx
for pipe_idx, stage_idx in enumerate(pipe_stage_indices)
}
except Exception:
progress_ui = None
pipe_index_by_stage = {}
return progress_ui, pipe_index_by_stage
def execute_tokens(self, tokens: List[str]) -> None:
from cmdlet import REGISTRY
from SYS import pipeline as ctx
try:
self._try_clear_pipeline_stop(ctx)
# REPL guard: stage-local tables should not persist across independent
# commands. Selection stages can always seed from last/display tables.
try:
if hasattr(ctx, "set_current_stage_table"):
ctx.set_current_stage_table(None)
except Exception:
pass
# Preflight (URL-duplicate prompts, etc.) should be cached within a single
# pipeline run, not across independent pipelines.
try:
ctx.store_value("preflight",
{})
except Exception:
pass
stages = self._split_stages(tokens)
if not stages:
print("Invalid pipeline syntax\n")
return
self._maybe_seed_current_stage_table(ctx)
stages = self._maybe_apply_pending_pipeline_tail(ctx, stages)
config = self._config_loader.load()
config = self._apply_quiet_background_flag(config)
(
stages,
first_stage_selection_indices,
first_stage_had_extra_args,
first_stage_select_all,
) = self._extract_first_stage_selection_tokens(stages)
first_stage_selection_indices = self._apply_select_all_if_requested(
ctx,
first_stage_selection_indices,
first_stage_select_all
)
piped_result: Any = None
worker_manager = WorkerManagerRegistry.ensure(config)
pipeline_text = " | ".join(" ".join(stage) for stage in stages)
pipeline_session = WorkerStages.begin_pipeline(
worker_manager,
pipeline_text=pipeline_text,
config=config
)
raw_stage_texts = self._get_raw_stage_texts(ctx)
self._maybe_enable_background_notifier(
worker_manager,
config,
pipeline_session
)
pipeline_status = "completed"
pipeline_error = ""
progress_ui = None
pipe_index_by_stage: Dict[int,
int] = {}
try:
ok, initial_piped = self._maybe_apply_initial_selection(
ctx,
config,
stages,
selection_indices=first_stage_selection_indices,
first_stage_had_extra_args=first_stage_had_extra_args,
worker_manager=worker_manager,
pipeline_session=pipeline_session,
)
if not ok:
return
if initial_piped is not None:
piped_result = initial_piped
# REPL guard: prevent add-relationship before add-file for download-media pipelines.
if not self._validate_download_media_relationship_order(stages):
pipeline_status = "failed"
pipeline_error = "Invalid pipeline order"
return
# ------------------------------------------------------------------
# Multi-level pipeline progress (pipes = stages, tasks = items)
# ------------------------------------------------------------------
progress_ui, pipe_index_by_stage = self._maybe_start_live_progress(config, stages)
for stage_index, stage_tokens in enumerate(stages):
if not stage_tokens:
continue
raw_stage_name = str(stage_tokens[0])
cmd_name = raw_stage_name.replace("_", "-").lower()
stage_args = stage_tokens[1:]
if cmd_name == "@":
# Prefer piping the last emitted/visible items (e.g. add-file results)
# over the result-table subject. The subject can refer to older context
# (e.g. a playlist row) and may not contain store+hash.
last_items = None
try:
last_items = ctx.get_last_result_items()
except Exception:
last_items = None
if last_items:
from cmdlet._shared import coerce_to_pipe_object
try:
pipe_items = [
coerce_to_pipe_object(x) for x in list(last_items)
]
except Exception:
pipe_items = list(last_items)
piped_result = pipe_items if len(pipe_items
) > 1 else pipe_items[0]
try:
ctx.set_last_items(pipe_items)
except Exception:
pass
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
pipeline_session.worker_id,
"@ used last result items"
)
except Exception:
pass
continue
subject = ctx.get_last_result_subject()
if subject is None:
print("No current result context available for '@'\n")
pipeline_status = "failed"
pipeline_error = "No result items/subject for @"
return
piped_result = subject
try:
subject_items = subject if isinstance(subject,
list) else [subject]
ctx.set_last_items(subject_items)
except Exception:
pass
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
pipeline_session.worker_id,
"@ used current table subject"
)
except Exception:
pass
continue
if cmd_name.startswith("@"): # selection stage
selection_token = raw_stage_name
selection = SelectionSyntax.parse(selection_token)
filter_spec = SelectionFilterSyntax.parse(selection_token)
is_select_all = selection_token.strip() == "@*"
if selection is None and filter_spec is None and not is_select_all:
print(f"Invalid selection: {selection_token}\n")
pipeline_status = "failed"
pipeline_error = f"Invalid selection {selection_token}"
return
selected_indices = []
# Prefer selecting from the last selectable *table* (search/playlist)
# rather than from display-only emitted items, unless we're explicitly
# selecting from an overlay table.
display_table = None
try:
display_table = (
ctx.get_display_table()
if hasattr(ctx,
"get_display_table") else None
)
except Exception:
display_table = None
stage_table = ctx.get_current_stage_table()
if not stage_table and display_table is not None:
stage_table = display_table
if not stage_table:
stage_table = ctx.get_last_result_table()
if display_table is not None and stage_table is display_table:
items_list = ctx.get_last_result_items() or []
else:
if hasattr(ctx, "get_last_selectable_result_items"):
items_list = ctx.get_last_selectable_result_items(
) or []
else:
items_list = ctx.get_last_result_items() or []
if is_select_all:
selected_indices = list(range(len(items_list)))
elif filter_spec is not None:
selected_indices = [
i for i, item in enumerate(items_list)
if SelectionFilterSyntax.matches(item, filter_spec)
]
else:
selected_indices = sorted(
[i - 1 for i in selection]
) # type: ignore[arg-type]
resolved_items = items_list if items_list else []
filtered = [
resolved_items[i] for i in selected_indices
if 0 <= i < len(resolved_items)
]
if not filtered:
print("No items matched selection\n")
pipeline_status = "failed"
pipeline_error = "Empty selection"
return
# Filter UX: if the stage token is a filter and it's terminal,
# render a filtered table overlay rather than selecting/auto-downloading.
stage_is_last = (stage_index + 1 >= len(stages))
if filter_spec is not None and stage_is_last:
try:
base_table = stage_table
if base_table is None:
base_table = ctx.get_last_result_table()
if base_table is not None and hasattr(base_table, "copy_with_title"):
new_table = base_table.copy_with_title(getattr(base_table, "title", "") or "Results")
else:
new_table = ResultTable(getattr(base_table, "title", "") if base_table is not None else "Results")
try:
if base_table is not None and getattr(base_table, "table", None):
new_table.set_table(str(getattr(base_table, "table")))
except Exception:
pass
try:
# Attach a one-line header so users see the active filter.
safe = str(selection_token)[1:].strip()
new_table.set_header_line(f'filter: "{safe}"')
except Exception:
pass
for item in filtered:
new_table.add_result(item)
try:
ctx.set_last_result_table_overlay(new_table, items=list(filtered), subject=ctx.get_last_result_subject())
except Exception:
pass
try:
stdout_console().print()
stdout_console().print(new_table)
except Exception:
pass
except Exception:
pass
continue
# UX: selecting a single URL row from get-url tables should open it.
# Only do this when the selection stage is terminal to avoid surprising
# side-effects in pipelines like `@1 | download-file`.
current_table = ctx.get_current_stage_table(
) or ctx.get_last_result_table()
if (not is_select_all) and (len(filtered) == 1):
try:
PipelineExecutor._maybe_open_url_selection(
current_table,
filtered,
stage_is_last=(stage_index + 1 >= len(stages)),
)
except Exception:
pass
if PipelineExecutor._maybe_run_class_selector(
ctx,
config,
filtered,
stage_is_last=(stage_index + 1 >= len(stages))):
return
# Special case: selecting multiple tags from get-tag and piping into delete-tag
# should batch into a single operation (one backend call).
next_cmd = None
try:
if stage_index + 1 < len(stages) and stages[stage_index +
1]:
next_cmd = str(stages[stage_index + 1][0]
).replace("_",
"-").lower()
except Exception:
next_cmd = None
def _is_tag_row(obj: Any) -> bool:
try:
if (hasattr(obj,
"__class__")
and obj.__class__.__name__ == "TagItem"
and hasattr(obj,
"tag_name")):
return True
except Exception:
pass
try:
if isinstance(obj, dict) and obj.get("tag_name"):
return True
except Exception:
pass
return False
if (next_cmd in {"delete-tag",
"delete_tag"} and len(filtered) > 1
and all(_is_tag_row(x) for x in filtered)):
from cmdlet._shared import get_field
tags: List[str] = []
first_hash = None
first_store = None
first_path = None
for item in filtered:
tag_name = get_field(item, "tag_name")
if tag_name:
tags.append(str(tag_name))
if first_hash is None:
first_hash = get_field(item, "hash")
if first_store is None:
first_store = get_field(item, "store")
if first_path is None:
first_path = get_field(item,
"path") or get_field(
item,
"target"
)
if tags:
grouped = {
"table": "tag.selection",
"media_kind": "tag",
"hash": first_hash,
"store": first_store,
"path": first_path,
"tag": tags,
}
piped_result = grouped
continue
from cmdlet._shared import coerce_to_pipe_object
filtered_pipe_objs = [
coerce_to_pipe_object(item) for item in filtered
]
piped_result = (
filtered_pipe_objs
if len(filtered_pipe_objs) > 1 else filtered_pipe_objs[0]
)
current_table = ctx.get_current_stage_table(
) or ctx.get_last_result_table()
table_type = (
current_table.table
if current_table and hasattr(current_table,
"table") else None
)
def _norm_stage_cmd(name: Any) -> str:
return str(name or "").replace("_", "-").strip().lower()
next_cmd = None
if stage_index + 1 < len(stages) and stages[stage_index + 1]:
next_cmd = _norm_stage_cmd(stages[stage_index + 1][0])
# Auto-insert downloader stages for provider tables.
# IMPORTANT: do not auto-download for filter selections; they may match many rows.
if filter_spec is None:
if stage_index + 1 >= len(stages):
if table_type == "youtube":
print("Auto-running YouTube selection via download-media")
stages.append(["download-media", *stage_args])
elif table_type == "bandcamp":
print("Auto-running Bandcamp selection via download-media")
stages.append(["download-media"])
elif table_type == "internetarchive":
print("Auto-loading Internet Archive item via download-file")
stages.append(["download-file"])
elif table_type == "podcastindex.episodes":
print("Auto-piping selection to download-file")
stages.append(["download-file"])
elif table_type in {"soulseek", "openlibrary", "libgen"}:
print("Auto-piping selection to download-file")
stages.append(["download-file"])
else:
if table_type == "soulseek" and next_cmd not in (
"download-file",
"download-media",
"download_media",
".pipe",
):
debug("Auto-inserting download-file after Soulseek selection")
stages.insert(stage_index + 1, ["download-file"])
if table_type == "youtube" and next_cmd not in (
"download-media",
"download_media",
"download-file",
".pipe",
):
debug("Auto-inserting download-media after YouTube selection")
stages.insert(stage_index + 1, ["download-media"])
if table_type == "bandcamp" and next_cmd not in (
"download-media",
"download_media",
"download-file",
".pipe",
):
print("Auto-inserting download-media after Bandcamp selection")
stages.insert(stage_index + 1, ["download-media"])
if table_type == "internetarchive" and next_cmd not in (
"download-file",
"download-media",
"download_media",
".pipe",
):
debug("Auto-inserting download-file after Internet Archive selection")
stages.insert(stage_index + 1, ["download-file"])
if table_type == "podcastindex.episodes" and next_cmd not in (
"download-file",
"download-media",
"download_media",
".pipe",
):
print("Auto-inserting download-file after PodcastIndex episode selection")
stages.insert(stage_index + 1, ["download-file"])
if table_type == "libgen" and next_cmd not in (
"download-file",
"download-media",
"download_media",
".pipe",
):
print("Auto-inserting download-file after Libgen selection")
stages.insert(stage_index + 1, ["download-file"])
continue
ensure_registry_loaded()
cmd_fn = REGISTRY.get(cmd_name)
if not cmd_fn:
print(f"Unknown command: {cmd_name}\n")
pipeline_status = "failed"
pipeline_error = f"Unknown command: {cmd_name}"
return
stage_session = WorkerStages.begin_stage(
worker_manager,
cmd_name=cmd_name,
stage_tokens=stage_tokens,
config=config,
command_text=" ".join(stage_tokens).strip(),
)
stage_worker_id = stage_session.worker_id if stage_session else None
# Estimate how many per-item tasks this pipe will run.
pipe_idx = pipe_index_by_stage.get(stage_index)
if progress_ui is not None and pipe_idx is not None:
try:
# Prefer piped input for task counts.
if isinstance(piped_result, list):
total_items = len(piped_result)
preview_items: Optional[List[Any]] = list(piped_result)
elif piped_result is not None:
total_items = 1
preview_items = [piped_result]
else:
# First stage without piped input: infer from URL-ish args.
preview: List[Any] = []
toks = list(stage_tokens[1:])
i = 0
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
if (cmd_name == "add-file" and low in {"-path",
"--path",
"-p"}
and i + 1 < len(toks)):
nxt = str(toks[i + 1])
if nxt:
if "," in nxt:
parts = [
p.strip().strip("\"'")
for p in nxt.split(",")
]
parts = [p for p in parts if p]
if parts:
preview.extend(parts)
i += 2
continue
else:
preview.append(nxt)
i += 2
continue
if low in {"-url",
"--url"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and not nxt.startswith("-"):
preview.append(nxt)
i += 2
continue
if (not t.startswith("-")) and ("://" in low
or low.startswith(
("magnet:",
"torrent:"))):
preview.append(t)
i += 1
preview_items = preview if preview else None
total_items = len(preview) if preview else 1
progress_ui.begin_pipe(
pipe_idx,
total_items=int(total_items),
items_preview=preview_items
)
except Exception:
pass
on_emit = None
if progress_ui is not None and pipe_idx is not None:
_ui = cast(Any, progress_ui)
def _on_emit(
obj: Any,
_idx: int = int(pipe_idx),
_progress=_ui
) -> None:
try:
_progress.on_emit(_idx, obj)
except Exception:
pass
on_emit = _on_emit
pipeline_ctx = ctx.PipelineStageContext(
stage_index=stage_index,
total_stages=len(stages),
pipe_index=pipe_idx,
worker_id=stage_worker_id,
on_emit=on_emit,
)
ctx.set_stage_context(pipeline_ctx)
stage_status = "completed"
stage_error = ""
stage_label = f"stage {stage_index + 1}/{len(stages)} ({cmd_name})"
try:
# Avoid leaking interactive selection tables across stages.
# (Selection/expansion happens before this loop, so clearing here is safe.)
try:
if hasattr(ctx, "set_current_stage_table"):
ctx.set_current_stage_table(None)
except Exception:
pass
try:
if hasattr(ctx, "set_current_cmdlet_name"):
ctx.set_current_cmdlet_name(cmd_name)
except Exception:
pass
try:
if hasattr(ctx, "set_current_stage_text"):
stage_text = ""
if raw_stage_texts and stage_index < len(raw_stage_texts
):
candidate = str(raw_stage_texts[stage_index]
or "").strip()
if candidate:
try:
cand_tokens = shlex.split(candidate)
except Exception:
cand_tokens = candidate.split()
if cand_tokens:
first = str(cand_tokens[0]
).replace("_",
"-").lower()
if first == cmd_name:
stage_text = candidate
if not stage_text:
stage_text = " ".join(stage_tokens).strip()
ctx.set_current_stage_text(stage_text)
except Exception:
pass
# `.pipe` is typically the terminal interactive stage (MPV UI).
# Stop Live progress before running it so output doesn't get stuck behind Live.
if (cmd_name == ".pipe" and progress_ui is not None
and (stage_index + 1 >= len(stages))):
try:
progress_ui.stop()
except Exception:
pass
try:
from SYS import pipeline as _pipeline_ctx
if hasattr(_pipeline_ctx, "set_live_progress"):
_pipeline_ctx.set_live_progress(None)
except Exception:
pass
progress_ui = None
ret_code = cmd_fn(piped_result, list(stage_args), config)
stage_is_last = stage_index + 1 >= len(stages)
# Graceful early-stop: preflight declined, etc.
try:
stop_req = (
ctx.get_pipeline_stop()
if hasattr(ctx,
"get_pipeline_stop") else None
)
except Exception:
stop_req = None
if stop_req is not None:
# Do not treat as an error; just end the pipeline quietly.
pipeline_status = "completed"
pipeline_error = ""
return
emits: List[Any] = []
if getattr(pipeline_ctx, "emits", None) is not None:
emits = list(pipeline_ctx.emits or [])
# Shared `-path` behavior: persist temp/PATH artifacts to destination.
if emits:
try:
from cmdlet import _shared as sh
emits = sh.apply_output_path_from_pipeobjects(
cmd_name=cmd_name,
args=list(stage_args),
emits=emits,
)
try:
pipeline_ctx.emits = list(emits)
except Exception:
pass
except Exception:
pass
if emits:
# If the cmdlet already installed an overlay table (e.g. get-tag),
# don't overwrite it: set_last_result_items_only() would clear the
# overlay table/subject and break '@' subject piping.
try:
has_overlay = (
bool(ctx.get_display_table())
if hasattr(ctx,
"get_display_table") else False
)
except Exception:
has_overlay = False
if not has_overlay:
ctx.set_last_result_items_only(emits)
piped_result = emits
else:
piped_result = None
# Some cmdlets (notably download-media format selection) populate a selectable
# current-stage table without emitting pipeline items. In these cases, render
# the table and pause the pipeline so the user can pick @N.
stage_table = (
ctx.get_current_stage_table()
if hasattr(ctx,
"get_current_stage_table") else None
)
stage_table_type = (
str(getattr(stage_table,
"table",
"") or "").strip().lower()
if stage_table else ""
)
try:
stage_table_source = (
str(getattr(stage_table,
"source_command",
"") or "").strip().replace("_",
"-").lower()
if stage_table else ""
)
except Exception:
stage_table_source = ""
if ((not stage_is_last) and (not emits) and cmd_name in {
"download-media",
"download_media",
"download-data",
"download_data",
} and stage_table is not None
and (stage_table_type in {
"ytdlp.formatlist",
"download-media",
"download_media",
"bandcamp",
"youtube",
} or stage_table_source in {"download-media",
"download_media"}
or stage_table_type in {"internetarchive.formats"}
or stage_table_source in {"download-file"})):
try:
is_selectable = not bool(
getattr(stage_table,
"no_choice",
False)
)
except Exception:
is_selectable = True
if is_selectable:
try:
already_rendered = bool(
getattr(
stage_table,
"_rendered_by_cmdlet",
False
)
)
except Exception:
already_rendered = False
if not already_rendered:
# Stop the Live progress display before printing a selectable table.
# Printing while Live is active can cause the table to be truncated/overwritten.
if progress_ui is not None:
try:
if pipe_idx is not None:
progress_ui.finish_pipe(
int(pipe_idx),
force_complete=True
)
except Exception:
pass
try:
progress_ui.stop()
except Exception:
pass
try:
from SYS import pipeline as _pipeline_ctx
if hasattr(_pipeline_ctx,
"set_live_progress"):
_pipeline_ctx.set_live_progress(None)
except Exception:
pass
progress_ui = None
stdout_console().print()
stdout_console().print(stage_table)
# Always pause the pipeline when a selectable table was produced.
# The user will continue by running @N/@* which will re-attach the
# pending downstream stages.
try:
remaining = stages[stage_index + 1:]
source_cmd = (
ctx.get_current_stage_table_source_command()
if hasattr(
ctx,
"get_current_stage_table_source_command"
) else None
)
if remaining and hasattr(
ctx,
"set_pending_pipeline_tail"):
ctx.set_pending_pipeline_tail(
remaining,
source_command=source_cmd or cmd_name
)
except Exception:
pass
return
# For the final stage, many cmdlets rely on the runner to render the
# table they placed into pipeline context (e.g. get-tag). Prefer a
# display table if one exists, otherwise the current-stage table.
if stage_is_last:
# Stop the Live progress display before printing the final table.
# This avoids cursor-control interactions that can truncate output.
if progress_ui is not None:
try:
if pipe_idx is not None:
progress_ui.finish_pipe(
int(pipe_idx),
force_complete=(
stage_status == "completed"
),
)
except Exception:
pass
try:
progress_ui.stop()
except Exception:
pass
try:
from SYS import pipeline as _pipeline_ctx
if hasattr(_pipeline_ctx, "set_live_progress"):
_pipeline_ctx.set_live_progress(None)
except Exception:
pass
progress_ui = None
final_table = None
try:
final_table = (
ctx.get_display_table()
if hasattr(ctx,
"get_display_table") else None
)
except Exception:
final_table = None
if final_table is None:
final_table = stage_table
# If the cmdlet emitted results but didn't supply a fresh table, it's
# common for `stage_table` to still point at the previous stage's table
# (e.g. add-file's canonical store table). In that case, prefer rendering
# the emitted results so the user sees the actual output of this stage.
if (emits and (ctx.get_display_table() if hasattr(
ctx,
"get_display_table") else None) is None):
try:
src_cmd = (
str(
getattr(final_table,
"source_command",
"") or ""
).strip().lower() if final_table else ""
)
except Exception:
src_cmd = ""
try:
cur_cmd = str(cmd_name
or "").strip().replace("_",
"-").lower()
except Exception:
cur_cmd = ""
if ((final_table is None) or (not src_cmd)
or (src_cmd.replace("_",
"-") != cur_cmd)):
try:
table_title = CmdletExecutor._get_table_title_for_command(
cmd_name,
emits,
list(stage_args)
)
except Exception:
table_title = "Results"
table = ResultTable(table_title)
for item in emits:
table.add_result(item)
try:
if hasattr(ctx,
"set_last_result_table_overlay"):
ctx.set_last_result_table_overlay(
table,
emits
)
if hasattr(ctx, "set_current_stage_table"):
ctx.set_current_stage_table(table)
except Exception:
pass
final_table = table
if final_table is not None:
try:
already_rendered = bool(
getattr(
final_table,
"_rendered_by_cmdlet",
False
)
)
except Exception:
already_rendered = False
if not already_rendered:
stdout_console().print()
stdout_console().print(final_table)
# (Fallback handled above by synthesizing an overlay ResultTable.)
if isinstance(ret_code, int) and ret_code != 0:
stage_status = "failed"
stage_error = f"exit code {ret_code}"
print(f"[{stage_label} exit code: {ret_code}]\n")
pipeline_status = "failed"
pipeline_error = f"{stage_label} failed ({stage_error})"
return
except Exception as exc:
stage_status = "failed"
stage_error = f"{type(exc).__name__}: {exc}"
print(f"[error in {stage_label}]: {stage_error}\n")
pipeline_status = "failed"
pipeline_error = f"{stage_label} error: {exc}"
return
finally:
if progress_ui is not None and pipe_idx is not None:
try:
progress_ui.finish_pipe(
int(pipe_idx),
force_complete=(stage_status == "completed")
)
except Exception:
pass
try:
if hasattr(ctx, "clear_current_cmdlet_name"):
ctx.clear_current_cmdlet_name()
except Exception:
pass
try:
if hasattr(ctx, "clear_current_stage_text"):
ctx.clear_current_stage_text()
except Exception:
pass
if stage_session:
stage_session.close(
status=stage_status,
error_msg=stage_error
)
elif pipeline_session and worker_manager:
try:
worker_manager.log_step(
pipeline_session.worker_id,
f"{stage_label} {'completed' if stage_status == 'completed' else 'failed'}",
)
except Exception:
pass
if not stages and piped_result is not None:
table = ResultTable("Selection Result")
items = piped_result if isinstance(piped_result,
list) else [piped_result]
for item in items:
table.add_result(item)
ctx.set_last_result_items_only(items)
stdout_console().print()
stdout_console().print(table)
except Exception as exc:
pipeline_status = "failed"
pipeline_error = str(exc)
print(f"[error] Failed to execute pipeline: {exc}\n")
finally:
if progress_ui is not None:
try:
progress_ui.stop()
except Exception:
pass
try:
from SYS import pipeline as _pipeline_ctx
if hasattr(_pipeline_ctx, "set_live_progress"):
_pipeline_ctx.set_live_progress(None)
except Exception:
pass
# End-of-command cleanup: avoid leaking current stage tables into
# the next REPL command (causes stale @ selection sources).
try:
if hasattr(ctx, "set_current_stage_table"):
ctx.set_current_stage_table(None)
except Exception:
pass
if pipeline_session:
pipeline_session.close(
status=pipeline_status,
error_msg=pipeline_error
)
except Exception as exc:
print(f"[error] Failed to execute pipeline: {exc}\n")
from rich.markdown import Markdown
from rich.console import Console
console = Console()
class MedeiaCLI:
"""Main CLI application object."""
ROOT = Path(__file__).resolve().parent
def __init__(self) -> None:
self._config_loader = ConfigLoader(root=self.ROOT)
# Optional dependency auto-install for configured tools (best-effort).
try:
from SYS.optional_deps import maybe_auto_install_configured_tools
maybe_auto_install_configured_tools(self._config_loader.load())
except Exception:
pass
self._cmdlet_executor = CmdletExecutor(config_loader=self._config_loader)
self._pipeline_executor = PipelineExecutor(config_loader=self._config_loader)
@staticmethod
def parse_selection_syntax(token: str) -> Optional[Set[int]]:
return SelectionSyntax.parse(token)
@classmethod
def get_store_choices(cls) -> List[str]:
loader = ConfigLoader(root=cls.ROOT)
return CmdletIntrospection.store_choices(loader.load())
def build_app(self) -> typer.Typer:
app = typer.Typer(help="Medeia-Macina CLI")
def _validate_pipeline_option(
ctx: typer.Context,
param: typer.CallbackParam,
value: str
):
try:
from SYS.cli_syntax import validate_pipeline_text
syntax_error = validate_pipeline_text(value)
if syntax_error:
raise typer.BadParameter(syntax_error.message)
except typer.BadParameter:
raise
except Exception:
pass
return value
@app.command("pipeline")
def pipeline(
command: str = typer.Option(
...,
"--pipeline",
"-p",
help="Pipeline command string to execute",
callback=_validate_pipeline_option,
),
seeds_json: Optional[str] = typer.Option(
None,
"--seeds-json",
"-s",
help="JSON string of seed items"
),
) -> None:
from SYS import pipeline as ctx
config = self._config_loader.load()
debug_enabled = bool(config.get("debug", False))
set_debug(debug_enabled)
if seeds_json:
try:
seeds = json.loads(seeds_json)
if not isinstance(seeds, list):
seeds = [seeds]
ctx.set_last_result_items_only(seeds)
except Exception as exc:
print(f"Error parsing seeds JSON: {exc}")
return
try:
from SYS.cli_syntax import validate_pipeline_text
syntax_error = validate_pipeline_text(command)
if syntax_error:
print(syntax_error.message, file=sys.stderr)
return
except Exception:
pass
try:
tokens = shlex.split(command)
except ValueError as exc:
print(f"Syntax error: {exc}", file=sys.stderr)
return
if not tokens:
return
self._pipeline_executor.execute_tokens(tokens)
@app.command("repl")
def repl() -> None:
self.run_repl()
@app.callback(invoke_without_command=True)
def main_callback(ctx: typer.Context) -> None:
if ctx.invoked_subcommand is None:
self.run_repl()
_ = (pipeline, repl, main_callback)
# Dynamically register all cmdlets as top-level Typer commands so users can
# invoke `mm <cmdlet> [args]` directly from the shell. We use Click/Typer
# context settings to allow arbitrary flags and options to pass through to
# the cmdlet system without Typer trying to parse them.
try:
names = list_cmdlet_names()
skip = {"pipeline", "repl"}
for nm in names:
if not nm or nm in skip:
continue
# create a scoped handler to capture the command name
def _make_handler(cmd_name: str):
@app.command(
cmd_name,
context_settings={
"ignore_unknown_options": True,
"allow_extra_args": True,
},
)
def _handler(ctx: typer.Context):
try:
args = list(ctx.args or [])
except Exception:
args = []
self._cmdlet_executor.execute(cmd_name, args)
return _handler
_make_handler(nm)
except Exception:
# Don't let failure to register dynamic commands break startup
pass
return app
def run(self) -> None:
# Ensure Rich tracebacks are active even when invoking subcommands.
try:
config = self._config_loader.load()
debug_enabled = bool(config.get("debug",
False)
) if isinstance(config,
dict) else False
except Exception:
debug_enabled = False
set_debug(debug_enabled)
_install_rich_traceback(show_locals=debug_enabled)
self.build_app()()
def run_repl(self) -> None:
# console = Console(width=100)
# Valid Rich rainbow colors
RAINBOW = [
"red",
"dark_orange",
"yellow",
"green",
"blue",
"purple",
"magenta",
]
def rainbow_pillar(colors, height=21, bar_width=36):
table = Table.grid(padding=0)
table.add_column(no_wrap=True)
for i in range(height):
color = colors[i % len(colors)]
table.add_row(Bar(size=1, begin=0, end=1, width=bar_width, color=color))
return table
# Build root layout
root = Layout(name="root")
root.split_row(
Layout(name="left",
ratio=2),
Layout(name="center",
ratio=8),
Layout(name="right",
ratio=2),
)
# Left pillar → forward rainbow
root["left"].update(
Panel(rainbow_pillar(RAINBOW,
height=21,
bar_width=36),
title="DELTA")
)
# Right pillar → reverse rainbow
root["right"].update(
Panel(
rainbow_pillar(list(reversed(RAINBOW)),
height=21,
bar_width=36),
title="LAMBDA"
)
)
# Center content
center_md = Markdown(
"""
# ****************** Medios Macina ******************
take what you want | keep what you like | share what you love
_____________________________________________________________
_____________________________________________________________
_____________________________________________________________
For suddenly you may be let loose from the net, and thrown out to sea.
Waving around clutching at gnats, unable to lift the heavy anchor. Lost
and without a map, forgotten things from the past by distracting wind storms.
_____________________________________________________________
_____________________________________________________________
_____________________________________________________________
Light shines a straight path to the golden shores.
Come to love it when others take what you share, as there is no greater joy
"""
)
root["center"].update(Panel(center_md, title="KAPPA", height=21))
console.print(root)
prompt_text = "<🜂🜄|🜁🜃>"
startup_table = ResultTable(
"*********<IGNITIO>*********<NOUSEMPEH>*********<RUGRAPOG>*********<OMEGHAU>*********"
)
startup_table.set_no_choice(True).set_preserve_order(True)
startup_table.set_value_case("upper")
def _upper(value: Any) -> str:
text = "" if value is None else str(value)
return text.upper()
def _add_startup_check(
status: str,
name: str,
*,
provider: str = "",
store: str = "",
files: int | str | None = None,
detail: str = "",
) -> None:
row = startup_table.add_row()
row.add_column("STATUS", _upper(status))
row.add_column("NAME", _upper(name))
row.add_column("PROVIDER", _upper(provider or ""))
row.add_column("STORE", _upper(store or ""))
row.add_column("FILES", "" if files is None else str(files))
row.add_column("DETAIL", _upper(detail or ""))
def _has_store_subtype(cfg: dict, subtype: str) -> bool:
store_cfg = cfg.get("store")
if not isinstance(store_cfg, dict):
return False
bucket = store_cfg.get(subtype)
if not isinstance(bucket, dict):
return False
return any(isinstance(v, dict) and bool(v) for v in bucket.values())
def _has_provider(cfg: dict, name: str) -> bool:
provider_cfg = cfg.get("provider")
if not isinstance(provider_cfg, dict):
return False
block = provider_cfg.get(str(name).strip().lower())
return isinstance(block, dict) and bool(block)
def _has_tool(cfg: dict, name: str) -> bool:
tool_cfg = cfg.get("tool")
if not isinstance(tool_cfg, dict):
return False
block = tool_cfg.get(str(name).strip().lower())
return isinstance(block, dict) and bool(block)
def _ping_url(url: str, timeout: float = 3.0) -> tuple[bool, str]:
try:
from API.HTTP import HTTPClient
with HTTPClient(timeout=timeout, retries=1) as client:
resp = client.get(url, allow_redirects=True)
code = int(getattr(resp, "status_code", 0) or 0)
ok = 200 <= code < 500
return ok, f"{url} (HTTP {code})"
except Exception as exc:
return False, f"{url} ({type(exc).__name__})"
config = self._config_loader.load()
debug_enabled = bool(config.get("debug", False))
set_debug(debug_enabled)
_install_rich_traceback(show_locals=debug_enabled)
_add_startup_check("ENABLED" if debug_enabled else "DISABLED", "DEBUGGING")
try:
try:
from MPV.mpv_ipc import MPV
import shutil
MPV()
mpv_path = shutil.which("mpv")
_add_startup_check("ENABLED", "MPV", detail=mpv_path or "Available")
except Exception as exc:
_add_startup_check("DISABLED", "MPV", detail=str(exc))
store_registry = None
if config:
try:
from Store import Store as StoreRegistry
store_registry = StoreRegistry(config=config, suppress_debug=True)
except Exception:
store_registry = None
if _has_store_subtype(config, "hydrusnetwork"):
store_cfg = config.get("store")
hydrus_cfg = (
store_cfg.get("hydrusnetwork",
{}) if isinstance(store_cfg,
dict) else {}
)
if isinstance(hydrus_cfg, dict):
for instance_name, instance_cfg in hydrus_cfg.items():
if not isinstance(instance_cfg, dict):
continue
name_key = str(instance_cfg.get("NAME") or instance_name)
url_val = str(instance_cfg.get("URL") or "").strip()
ok = bool(
store_registry
and store_registry.is_available(name_key)
)
status = "ENABLED" if ok else "DISABLED"
if ok:
total = None
try:
if store_registry:
backend = store_registry[name_key]
total = getattr(backend, "total_count", None)
if total is None:
getter = getattr(
backend,
"get_total_count",
None
)
if callable(getter):
total = getter()
except Exception:
total = None
detail = url_val
files = total if isinstance(
total,
int
) and total >= 0 else None
else:
err = None
if store_registry:
err = store_registry.get_backend_error(
instance_name
) or store_registry.get_backend_error(name_key)
detail = (url_val + (" - " if url_val else "")
) + (err or "Unavailable")
files = None
_add_startup_check(
status,
name_key,
store="hydrusnetwork",
files=files,
detail=detail
)
provider_cfg = config.get("provider"
) if isinstance(config,
dict) else None
if isinstance(provider_cfg, dict) and provider_cfg:
from Provider.metadata_provider import list_metadata_providers
from ProviderCore.registry import (
list_file_providers,
list_providers,
list_search_providers,
)
provider_availability = list_providers(config) or {}
search_availability = list_search_providers(config) or {}
file_availability = list_file_providers(config) or {}
meta_availability = list_metadata_providers(config) or {}
def _provider_display_name(key: str) -> str:
k = (key or "").strip()
low = k.lower()
if low == "openlibrary":
return "OpenLibrary"
if low == "alldebrid":
return "AllDebrid"
if low == "youtube":
return "YouTube"
return k[:1].upper() + k[1:] if k else "Provider"
already_checked = {"matrix"}
def _default_provider_ping_targets(provider_key: str) -> list[str]:
prov = (provider_key or "").strip().lower()
if prov == "openlibrary":
return ["https://openlibrary.org"]
if prov == "youtube":
return ["https://www.youtube.com"]
if prov == "bandcamp":
return ["https://bandcamp.com"]
if prov == "libgen":
from Provider.libgen import MIRRORS
mirrors = [
str(x).rstrip("/") for x in (MIRRORS or [])
if str(x).strip()
]
return [m + "/json.php" for m in mirrors]
return []
def _ping_first(urls: list[str]) -> tuple[bool, str]:
for u in urls:
ok, detail = _ping_url(u)
if ok:
return True, detail
if urls:
ok, detail = _ping_url(urls[0])
return ok, detail
return False, "No ping target"
for provider_name in provider_cfg.keys():
prov = str(provider_name or "").strip().lower()
if not prov or prov in already_checked:
continue
display = _provider_display_name(prov)
if prov == "alldebrid":
try:
from Provider.alldebrid import _get_debrid_api_key
from API.alldebrid import AllDebridClient
api_key = _get_debrid_api_key(config)
if not api_key:
_add_startup_check(
"DISABLED",
display,
provider=prov,
detail="Not configured"
)
else:
client = AllDebridClient(api_key)
base_url = str(
getattr(client,
"base_url",
"") or ""
).strip()
_add_startup_check(
"ENABLED",
display,
provider=prov,
detail=base_url or "Connected",
)
except Exception as exc:
_add_startup_check(
"DISABLED",
display,
provider=prov,
detail=str(exc)
)
continue
is_known = False
ok_val: Optional[bool] = None
if prov in provider_availability:
is_known = True
ok_val = bool(provider_availability.get(prov))
elif prov in search_availability:
is_known = True
ok_val = bool(search_availability.get(prov))
elif prov in file_availability:
is_known = True
ok_val = bool(file_availability.get(prov))
elif prov in meta_availability:
is_known = True
ok_val = bool(meta_availability.get(prov))
if not is_known:
_add_startup_check(
"UNKNOWN",
display,
provider=prov,
detail="Not registered"
)
else:
detail = "Configured" if ok_val else "Not configured"
ping_targets = _default_provider_ping_targets(prov)
if ping_targets:
ping_ok, ping_detail = _ping_first(ping_targets)
if ok_val:
detail = ping_detail
else:
detail = (
(detail + " | " +
ping_detail) if ping_detail else detail
)
_add_startup_check(
"ENABLED" if ok_val else "DISABLED",
display,
provider=prov,
detail=detail,
)
already_checked.add(prov)
default_search_providers = [
"openlibrary",
"libgen",
"youtube",
"bandcamp"
]
for prov in default_search_providers:
if prov in already_checked:
continue
display = _provider_display_name(prov)
ok_val = (
bool(search_availability.get(prov))
if prov in search_availability else False
)
ping_targets = _default_provider_ping_targets(prov)
ping_ok, ping_detail = (
_ping_first(ping_targets) if ping_targets else (False, "No ping target")
)
detail = ping_detail or (
"Available" if ok_val else "Unavailable"
)
if not ok_val:
detail = "Unavailable" + (
f" | {ping_detail}" if ping_detail else ""
)
_add_startup_check(
"ENABLED" if (ok_val and ping_ok) else "DISABLED",
display,
provider=prov,
detail=detail,
)
already_checked.add(prov)
if "0x0" not in already_checked:
ok_val = (
bool(file_availability.get("0x0"))
if "0x0" in file_availability else False
)
ping_ok, ping_detail = _ping_url("https://0x0.st")
detail = ping_detail
if not ok_val:
detail = "Unavailable" + (
f" | {ping_detail}" if ping_detail else ""
)
_add_startup_check(
"ENABLED" if (ok_val and ping_ok) else "DISABLED",
"0x0",
provider="0x0",
detail=detail,
)
if _has_provider(config, "matrix"):
try:
from Provider.matrix import Matrix
provider = Matrix(config)
matrix_conf = (
config.get("provider",
{}).get("matrix",
{}) if isinstance(config,
dict) else {}
)
homeserver = str(matrix_conf.get("homeserver") or "").strip()
room_id = str(matrix_conf.get("room_id") or "").strip()
if homeserver and not homeserver.startswith("http"):
homeserver = f"https://{homeserver}"
target = homeserver.rstrip("/")
if room_id:
target = (
target + (" " if target else "")
) + f"room:{room_id}"
_add_startup_check(
"ENABLED" if provider.validate() else "DISABLED",
"Matrix",
provider="matrix",
detail=target or
("Connected" if provider.validate() else "Not configured"),
)
except Exception as exc:
_add_startup_check(
"DISABLED",
"Matrix",
provider="matrix",
detail=str(exc)
)
if _has_store_subtype(config, "folder"):
store_cfg = config.get("store")
folder_cfg = store_cfg.get("folder",
{}) if isinstance(store_cfg,
dict) else {}
if isinstance(folder_cfg, dict) and folder_cfg:
for instance_name, instance_cfg in folder_cfg.items():
if not isinstance(instance_cfg, dict):
continue
name_key = str(instance_cfg.get("NAME") or instance_name)
path_val = str(
instance_cfg.get("PATH") or instance_cfg.get("path")
or ""
).strip()
ok = bool(
store_registry
and store_registry.is_available(name_key)
)
if ok and store_registry:
backend = store_registry[name_key]
scan_ok = bool(getattr(backend, "scan_ok", True))
scan_detail = str(
getattr(backend,
"scan_detail",
"") or ""
)
stats = getattr(backend, "scan_stats", None)
files = None
if isinstance(stats, dict):
total_db = stats.get("files_total_db")
if isinstance(total_db, (int, float)):
files = int(total_db)
status = "SCANNED" if scan_ok else "ERROR"
detail = (path_val + (" - " if path_val else "")
) + (scan_detail or "Up to date")
_add_startup_check(
status,
name_key,
store="folder",
files=files,
detail=detail
)
else:
err = None
if store_registry:
err = store_registry.get_backend_error(
instance_name
) or store_registry.get_backend_error(name_key)
detail = (path_val + (" - " if path_val else "")
) + (err or "Unavailable")
_add_startup_check(
"ERROR",
name_key,
store="folder",
detail=detail
)
if _has_store_subtype(config, "debrid"):
try:
from SYS.config import get_debrid_api_key
from API.alldebrid import AllDebridClient
api_key = get_debrid_api_key(config)
if not api_key:
_add_startup_check(
"DISABLED",
"Debrid",
store="debrid",
detail="Not configured"
)
else:
client = AllDebridClient(api_key)
base_url = str(getattr(client,
"base_url",
"") or "").strip()
_add_startup_check(
"ENABLED",
"Debrid",
store="debrid",
detail=base_url or "Connected"
)
except Exception as exc:
_add_startup_check(
"DISABLED",
"Debrid",
store="debrid",
detail=str(exc)
)
try:
from tool.ytdlp import YtDlpTool
cookiefile = YtDlpTool(config).resolve_cookiefile()
if cookiefile is not None:
_add_startup_check("FOUND", "Cookies", detail=str(cookiefile))
else:
_add_startup_check("MISSING", "Cookies", detail="Not found")
except Exception as exc:
_add_startup_check("ERROR", "Cookies", detail=str(exc))
# Tool checks (configured via [tool=...])
if _has_tool(config, "florencevision"):
try:
tool_cfg = config.get("tool")
fv_cfg = tool_cfg.get("florencevision") if isinstance(tool_cfg, dict) else None
enabled = bool(fv_cfg.get("enabled")) if isinstance(fv_cfg, dict) else False
if not enabled:
_add_startup_check(
"DISABLED",
"FlorenceVision",
provider="tool",
detail="Not enabled",
)
else:
from SYS.optional_deps import florencevision_missing_modules
missing = florencevision_missing_modules()
if missing:
_add_startup_check(
"DISABLED",
"FlorenceVision",
provider="tool",
detail="Missing: " + ", ".join(missing),
)
else:
_add_startup_check(
"ENABLED",
"FlorenceVision",
provider="tool",
detail="Ready",
)
except Exception as exc:
_add_startup_check(
"DISABLED",
"FlorenceVision",
provider="tool",
detail=str(exc),
)
if startup_table.rows:
stdout_console().print()
stdout_console().print(startup_table)
except Exception as exc:
if debug_enabled:
debug(f"⚠ Could not check service availability: {exc}")
style = Style.from_dict(
{
"cmdlet": "#ffffff",
"argument": "#3b8eea",
"value": "#9a3209",
"string": "#6d0d93",
"pipe": "#4caf50",
"selection_at": "#f1c40f",
"selection_range": "#4caf50",
"bottom-toolbar": "noreverse",
}
)
class ToolbarState:
text: str = ""
last_update_time: float = 0.0
clear_timer: Optional[threading.Timer] = None
toolbar_state = ToolbarState()
session: Optional[PromptSession] = None
def get_toolbar() -> Optional[str]:
if not toolbar_state.text or not toolbar_state.text.strip():
return None
if time.time() - toolbar_state.last_update_time > 3:
toolbar_state.text = ""
return None
return toolbar_state.text
def update_toolbar(text: str) -> None:
nonlocal session
text = text.strip()
toolbar_state.text = text
toolbar_state.last_update_time = time.time()
if toolbar_state.clear_timer:
toolbar_state.clear_timer.cancel()
toolbar_state.clear_timer = None
if text:
def clear_toolbar() -> None:
toolbar_state.text = ""
toolbar_state.clear_timer = None
if session is not None and hasattr(
session,
"app") and session.app.is_running:
session.app.invalidate()
toolbar_state.clear_timer = threading.Timer(3.0, clear_toolbar)
toolbar_state.clear_timer.daemon = True
toolbar_state.clear_timer.start()
if session is not None and hasattr(session,
"app") and session.app.is_running:
session.app.invalidate()
self._pipeline_executor.set_toolbar_output(update_toolbar)
completer = CmdletCompleter(config_loader=self._config_loader)
session = PromptSession(
completer=cast(Any,
completer),
lexer=MedeiaLexer(),
style=style,
bottom_toolbar=get_toolbar,
refresh_interval=0.5,
)
while True:
try:
user_input = session.prompt(prompt_text).strip()
except (EOFError, KeyboardInterrupt):
print("He who is victorious through deceit is defeated by the truth.")
break
if not user_input:
continue
low = user_input.lower()
if low in {"exit",
"quit",
"q"}:
print("He who is victorious through deceit is defeated by the truth.")
break
if low in {"help",
"?"}:
CmdletHelp.show_cmdlet_list()
continue
pipeline_ctx_ref = None
try:
from SYS import pipeline as ctx
ctx.set_current_command_text(user_input)
pipeline_ctx_ref = ctx
except Exception:
pipeline_ctx_ref = None
try:
from SYS.cli_syntax import validate_pipeline_text
syntax_error = validate_pipeline_text(user_input)
if syntax_error:
print(syntax_error.message, file=sys.stderr)
continue
except Exception:
pass
try:
tokens = shlex.split(user_input)
except ValueError as exc:
print(f"Syntax error: {exc}", file=sys.stderr)
continue
if not tokens:
continue
if len(tokens) == 1 and tokens[0] == "@,,":
try:
from SYS import pipeline as ctx
if ctx.restore_next_result_table():
last_table = (
ctx.get_display_table()
if hasattr(ctx,
"get_display_table") else None
)
if last_table is None:
last_table = ctx.get_last_result_table()
if last_table:
stdout_console().print()
ctx.set_current_stage_table(last_table)
stdout_console().print(last_table)
else:
items = ctx.get_last_result_items()
if items:
ctx.set_current_stage_table(None)
print(
f"Restored {len(items)} items (no table format available)"
)
else:
print("No forward history available", file=sys.stderr)
except Exception as exc:
print(f"Error restoring next table: {exc}", file=sys.stderr)
continue
if len(tokens) == 1 and tokens[0] == "@..":
try:
from SYS import pipeline as ctx
if ctx.restore_previous_result_table():
last_table = (
ctx.get_display_table()
if hasattr(ctx,
"get_display_table") else None
)
if last_table is None:
last_table = ctx.get_last_result_table()
# Auto-refresh search-file tables when navigating back,
# so row payloads (titles/tags) reflect latest store state.
try:
src_cmd = (
getattr(last_table,
"source_command",
None) if last_table else None
)
if (isinstance(src_cmd,
str)
and src_cmd.lower().replace("_",
"-") == "search-file"):
src_args = (
getattr(last_table,
"source_args",
None) if last_table else None
)
base_args = list(src_args
) if isinstance(src_args,
list) else []
cleaned_args = [
str(a) for a in base_args if str(a).strip().lower()
not in {"--refresh", "-refresh"}
]
if hasattr(ctx, "set_current_command_text"):
try:
title_text = (
getattr(last_table,
"title",
None) if last_table else None
)
if isinstance(title_text,
str) and title_text.strip():
ctx.set_current_command_text(
title_text.strip()
)
else:
ctx.set_current_command_text(
" ".join(
["search-file",
*cleaned_args]
).strip()
)
except Exception:
pass
try:
self._cmdlet_executor.execute(
"search-file",
cleaned_args + ["--refresh"]
)
finally:
if hasattr(ctx, "clear_current_command_text"):
try:
ctx.clear_current_command_text()
except Exception:
pass
continue
except Exception as exc:
print(
f"Error refreshing search-file table: {exc}",
file=sys.stderr
)
if last_table:
stdout_console().print()
ctx.set_current_stage_table(last_table)
stdout_console().print(last_table)
else:
items = ctx.get_last_result_items()
if items:
ctx.set_current_stage_table(None)
print(
f"Restored {len(items)} items (no table format available)"
)
else:
print("No previous result table in history")
else:
print("Result table history is empty")
except Exception as exc:
print(f"Error restoring previous result table: {exc}")
continue
try:
if "|" in tokens or (tokens and tokens[0].startswith("@")):
self._pipeline_executor.execute_tokens(tokens)
else:
cmd_name = tokens[0].replace("_", "-").lower()
is_help = any(
arg in {"-help",
"--help",
"-h"} for arg in tokens[1:]
)
if is_help:
CmdletHelp.show_cmdlet_help(cmd_name)
else:
self._cmdlet_executor.execute(cmd_name, tokens[1:])
finally:
if pipeline_ctx_ref:
pipeline_ctx_ref.clear_current_command_text()
if __name__ == "__main__":
MedeiaCLI().run()