This commit is contained in:
2026-01-22 01:53:13 -08:00
parent b3e7f3e277
commit 33406a6ecf
17 changed files with 857 additions and 877 deletions

View File

@@ -4,6 +4,7 @@ import sys
import json
import socket
import re
from datetime import datetime
from urllib.parse import urlparse, parse_qs
from pathlib import Path
from cmdlet._shared import Cmdlet, CmdletArg, parse_cmdlet_args, resolve_tidal_manifest_path
@@ -13,8 +14,7 @@ from MPV.mpv_ipc import MPV
from SYS import pipeline as ctx
from SYS.models import PipeObject
from API.folder import LocalLibrarySearchOptimizer
from SYS.config import get_local_storage_path, get_hydrus_access_key, get_hydrus_url
from SYS.config import get_hydrus_access_key, get_hydrus_url
_ALLDEBRID_UNLOCK_CACHE: Dict[str,
str] = {}
@@ -27,6 +27,94 @@ def _repo_root() -> Path:
return Path(os.getcwd())
def _playlist_store_path() -> Path:
return _repo_root() / "mpv_playlists.json"
def _load_playlist_store(path: Path) -> Dict[str, Any]:
if not path.exists():
return {"next_id": 1, "playlists": []}
try:
data = json.loads(path.read_text(encoding="utf-8"))
if not isinstance(data, dict):
return {"next_id": 1, "playlists": []}
data.setdefault("next_id", 1)
data.setdefault("playlists", [])
if not isinstance(data["playlists"], list):
data["playlists"] = []
return data
except Exception:
return {"next_id": 1, "playlists": []}
def _save_playlist_store(path: Path, data: Dict[str, Any]) -> bool:
try:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(data, indent=2), encoding="utf-8")
return True
except Exception:
return False
def _save_playlist(name: str, items: List[Any]) -> bool:
path = _playlist_store_path()
data = _load_playlist_store(path)
playlists = data.get("playlists", [])
now = datetime.utcnow().isoformat(timespec="seconds") + "Z"
for pl in playlists:
if str(pl.get("name")).strip().lower() == str(name).strip().lower():
pl["items"] = list(items)
pl["updated_at"] = now
return _save_playlist_store(path, data)
new_id = int(data.get("next_id") or 1)
data["next_id"] = new_id + 1
playlists.append({
"id": new_id,
"name": name,
"items": list(items),
"updated_at": now,
})
data["playlists"] = playlists
return _save_playlist_store(path, data)
def _get_playlist_by_id(playlist_id: int) -> Optional[tuple[str, List[Any]]]:
data = _load_playlist_store(_playlist_store_path())
for pl in data.get("playlists", []):
try:
if int(pl.get("id")) == int(playlist_id):
return str(pl.get("name") or ""), list(pl.get("items") or [])
except Exception:
continue
return None
def _delete_playlist(playlist_id: int) -> bool:
path = _playlist_store_path()
data = _load_playlist_store(path)
playlists = data.get("playlists", [])
kept = []
removed = False
for pl in playlists:
try:
if int(pl.get("id")) == int(playlist_id):
removed = True
continue
except Exception:
pass
kept.append(pl)
data["playlists"] = kept
return _save_playlist_store(path, data) if removed else False
def _get_playlists() -> List[Dict[str, Any]]:
data = _load_playlist_store(_playlist_store_path())
playlists = data.get("playlists", [])
return [dict(pl) for pl in playlists if isinstance(pl, dict)]
def _repo_log_dir() -> Path:
d = _repo_root() / "Log"
try:
@@ -828,23 +916,8 @@ def _get_playable_path(
backend_class = type(backend).__name__
backend_target_resolved = True
# Folder stores: resolve to an on-disk file path.
if (hasattr(backend, "get_file") and callable(getattr(backend, "get_file"))
and backend_class == "Folder"):
try:
resolved = backend.get_file(file_hash)
if isinstance(resolved, Path):
path = str(resolved)
elif resolved is not None:
path = str(resolved)
except Exception as e:
debug(
f"Error resolving file path from store '{store}': {e}",
file=sys.stderr
)
# HydrusNetwork: build a playable API file URL without browser side-effects.
elif backend_class == "HydrusNetwork":
if backend_class == "HydrusNetwork":
try:
client = getattr(backend, "_client", None)
base_url = getattr(client, "url", None)
@@ -1367,58 +1440,38 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
# If we save 'memory://...', it will work when loaded back.
clean_items.append(item)
# Use config from context or load it
config_data = config if config else {}
storage_path = get_local_storage_path(config_data) or _default_state_dir()
try:
Path(storage_path).mkdir(parents=True, exist_ok=True)
except Exception:
pass
with LocalLibrarySearchOptimizer(storage_path) as db:
if db.save_playlist(playlist_name, clean_items):
debug(f"Playlist saved as '{playlist_name}'")
return 0
else:
debug(f"Failed to save playlist '{playlist_name}'")
return 1
if _save_playlist(playlist_name, clean_items):
debug(f"Playlist saved as '{playlist_name}'")
return 0
debug(f"Failed to save playlist '{playlist_name}'")
return 1
# Handle Load Playlist
current_playlist_name = None
if load_mode:
# Use config from context or load it
config_data = config if config else {}
if index_arg:
try:
pl_id = int(index_arg)
storage_path = get_local_storage_path(config_data)
if not storage_path:
debug("Local storage path not configured.")
return 1
with LocalLibrarySearchOptimizer(storage_path) as db:
if index_arg:
try:
pl_id = int(index_arg)
# Handle Delete Playlist (if -clear is also passed)
if clear_mode:
if db.delete_playlist(pl_id):
debug(f"Playlist ID {pl_id} deleted.")
# Clear index_arg so we fall through to list mode and show updated list
index_arg = None
# Don't return, let it list the remaining playlists
else:
debug(f"Failed to delete playlist ID {pl_id}.")
return 1
# Handle Delete Playlist (if -clear is also passed)
if clear_mode:
if _delete_playlist(pl_id):
debug(f"Playlist ID {pl_id} deleted.")
# Clear index_arg so we fall through to list mode and show updated list
index_arg = None
# Don't return, let it list the remaining playlists
else:
# Handle Load Playlist
result = db.get_playlist_by_id(pl_id)
if result is None:
debug(f"Playlist ID {pl_id} not found.")
return 1
debug(f"Failed to delete playlist ID {pl_id}.")
return 1
else:
# Handle Load Playlist
result = _get_playlist_by_id(pl_id)
if result is None:
debug(f"Playlist ID {pl_id} not found.")
return 1
name, items = result
current_playlist_name = name
name, items = result
current_playlist_name = name
# Queue items (replacing current playlist)
if items:
@@ -1446,42 +1499,42 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
debug(f"Invalid playlist ID: {index_arg}")
return 1
# If we deleted or didn't have an index, list playlists
if not index_arg:
playlists = db.get_playlists()
# If we deleted or didn't have an index, list playlists
if not index_arg:
playlists = _get_playlists()
if not playlists:
debug("No saved playlists found.")
return 0
table = Table("Saved Playlists")
for i, pl in enumerate(playlists):
item_count = len(pl.get("items", []))
row = table.add_row()
# row.add_column("ID", str(pl['id'])) # Hidden as per user request
row.add_column("Name", pl["name"])
row.add_column("Items", str(item_count))
row.add_column("Updated", pl["updated_at"])
# Set the playlist items as the result object for this row
# When user selects @N, they get the list of items
# We also set the source command to .pipe -load <ID> so it loads it
table.set_row_selection_args(i, ["-load", str(pl["id"])])
table.set_source_command(".mpv")
# Register results
ctx.set_last_result_table_overlay(
table,
[p["items"] for p in playlists]
)
ctx.set_current_stage_table(table)
# Do not print directly here.
# Both CmdletExecutor and PipelineExecutor render the current-stage/overlay table,
# so printing here would duplicate output.
if not playlists:
debug("No saved playlists found.")
return 0
table = Table("Saved Playlists")
for i, pl in enumerate(playlists):
item_count = len(pl.get("items", []))
row = table.add_row()
# row.add_column("ID", str(pl['id'])) # Hidden as per user request
row.add_column("Name", pl["name"])
row.add_column("Items", str(item_count))
row.add_column("Updated", pl.get("updated_at") or "")
# Set the playlist items as the result object for this row
# When user selects @N, they get the list of items
# We also set the source command to .pipe -load <ID> so it loads it
table.set_row_selection_args(i, ["-load", str(pl["id"])])
table.set_source_command(".mpv")
# Register results
ctx.set_last_result_table_overlay(
table,
[p["items"] for p in playlists]
)
ctx.set_current_stage_table(table)
# Do not print directly here.
# Both CmdletExecutor and PipelineExecutor render the current-stage/overlay table,
# so printing here would duplicate output.
return 0
# Everything below was originally outside a try block; keep it inside so `start_opts` is in scope.
# Handle Play/Pause commands (but skip if we have index_arg to play a specific item)
@@ -1850,20 +1903,6 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
if len(stem) == 64 and all(c in "0123456789abcdef"
for c in stem.lower()):
file_hash = stem.lower()
# Find which folder store has this file
if file_storage:
for backend_name in file_storage.list_backends():
backend = file_storage[backend_name]
if type(backend).__name__ == "Folder":
# Check if this backend has the file
try:
result_path = backend.get_file(file_hash)
if isinstance(result_path,
Path) and result_path.exists():
store_name = backend_name
break
except Exception:
pass
# Fallback to inferred store if we couldn't find it
if not store_name:

View File

@@ -242,28 +242,6 @@ def _run(result: Any, args: List[str], config: Dict[str, Any]) -> int:
_add_startup_check(startup_table, "DISABLED", "Matrix", provider="matrix", detail=str(exc))
debug(f"Matrix instantiation failed: {exc}")
# Folders
if _has_store_subtype(config, "folder"):
fcfg = config.get("store", {}).get("folder", {})
for iname, icfg in fcfg.items():
if not isinstance(icfg, dict): continue
nkey = str(icfg.get("NAME") or iname)
pval = str(icfg.get("PATH") or icfg.get("path") or "").strip()
debug(f"Folder store check: name={nkey}, path={pval}")
ok = bool(store_registry and store_registry.is_available(nkey))
if ok and store_registry:
backend = store_registry[nkey]
scan_ok = getattr(backend, "scan_ok", True)
sdet = getattr(backend, "scan_detail", "Up to date")
stats = getattr(backend, "scan_stats", {})
files = int(stats.get("files_total_db", 0)) if stats else None
debug(f"Folder backend '{nkey}': scan_ok={scan_ok}, scan_detail={sdet}, stats={stats}")
_add_startup_check(startup_table, "SCANNED" if scan_ok else "ERROR", nkey, store="folder", files=files, detail=f"{pval} - {sdet}")
else:
err = store_registry.get_backend_error(iname) if store_registry else None
debug(f"Folder backend '{nkey}' error: {err}")
_add_startup_check(startup_table, "ERROR", nkey, store="folder", detail=f"{pval} - {err or 'Unavailable'}")
# Cookies
try:
from tool.ytdlp import YtDlpTool

View File

@@ -12,7 +12,7 @@ from cmdlet import register
from cmdlet._shared import Cmdlet, CmdletArg
from SYS import pipeline as ctx
from SYS.logger import log
from SYS.config import get_local_storage_path
from SYS.database import db as _db, get_worker_stdout
DEFAULT_LIMIT = 100
WORKER_STATUS_FILTERS = {"running",
@@ -74,6 +74,69 @@ CMDLET = Cmdlet(
)
def _normalize_worker_row(row: Dict[str, Any]) -> Dict[str, Any]:
worker_id = row.get("id")
created = row.get("created_at") or ""
updated = row.get("updated_at") or ""
payload = dict(row)
payload["worker_id"] = worker_id
payload["started_at"] = created
payload["last_updated"] = updated
payload["completed_at"] = updated
payload["pipe"] = row.get("details") or row.get("title") or ""
return payload
class _WorkerDB:
def clear_finished_workers(self) -> int:
try:
cur = _db.execute("DELETE FROM workers WHERE status != 'running'")
return int(getattr(cur, "rowcount", 0) or 0)
except Exception:
return 0
def get_worker(self, worker_id: str) -> Dict[str, Any] | None:
row = _db.fetchone("SELECT * FROM workers WHERE id = ?", (worker_id,))
if not row:
return None
worker = _normalize_worker_row(dict(row))
try:
worker["stdout"] = get_worker_stdout(worker_id)
except Exception:
worker["stdout"] = ""
return worker
def get_worker_events(self, worker_id: str) -> List[Dict[str, Any]]:
try:
rows = _db.fetchall(
"SELECT content, channel, timestamp FROM worker_stdout WHERE worker_id = ? ORDER BY timestamp ASC",
(worker_id,),
)
except Exception:
rows = []
events: List[Dict[str, Any]] = []
for row in rows:
try:
events.append({
"message": row.get("content"),
"channel": row.get("channel") or "stdout",
"created_at": row.get("timestamp"),
})
except Exception:
continue
return events
def get_all_workers(self, limit: int = 100) -> List[Dict[str, Any]]:
try:
rows = _db.fetchall(
"SELECT * FROM workers ORDER BY created_at DESC LIMIT ?",
(int(limit or 100),),
)
except Exception:
rows = []
return [_normalize_worker_row(dict(row)) for row in rows]
def _has_help_flag(args_list: Sequence[str]) -> bool:
return any(str(arg).lower() in HELP_FLAGS for arg in args_list)
@@ -101,39 +164,33 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
options = _parse_worker_args(args_list)
library_root = get_local_storage_path(config or {})
if not library_root:
log("No library root configured. Use the .config command to set up storage.", file=sys.stderr)
return 1
try:
from API.folder import API_folder_store
db = _WorkerDB()
with API_folder_store(library_root) as db:
if options.clear:
count = db.clear_finished_workers()
log(f"Cleared {count} finished workers.")
if options.clear:
count = db.clear_finished_workers()
log(f"Cleared {count} finished workers.")
return 0
if options.worker_id:
worker = db.get_worker(options.worker_id)
if worker:
events: List[Dict[str, Any]] = []
try:
wid = worker.get("worker_id")
if wid:
events = db.get_worker_events(wid)
except Exception:
pass
_emit_worker_detail(worker, events)
return 0
log(f"Worker not found: {options.worker_id}", file=sys.stderr)
return 1
if options.worker_id:
worker = db.get_worker(options.worker_id)
if worker:
events: List[Dict[str, Any]] = []
try:
wid = worker.get("worker_id")
if wid and hasattr(db, "get_worker_events"):
events = db.get_worker_events(wid)
except Exception:
pass
_emit_worker_detail(worker, events)
return 0
log(f"Worker not found: {options.worker_id}", file=sys.stderr)
return 1
if selection_requested:
return _render_worker_selection(db, result)
if selection_requested:
return _render_worker_selection(db, result)
return _render_worker_list(db, options.status, options.limit)
return _render_worker_list(db, options.status, options.limit)
except Exception as exc:
log(f"Workers query failed: {exc}", file=sys.stderr)
import traceback