dsf
This commit is contained in:
@@ -1392,7 +1392,12 @@ class API_folder_store:
|
||||
return 0
|
||||
|
||||
def delete_file(self, file_path: Path) -> bool:
|
||||
"""Delete a file from the database by path. Cascades to metadata, tags, notes, etc."""
|
||||
"""Delete a file from the database by path.
|
||||
|
||||
Cascades to metadata, tags, notes, etc, and also cleans up relationship
|
||||
backlinks in other files so no file retains dangling references to the
|
||||
deleted hash.
|
||||
"""
|
||||
try:
|
||||
str_path = str(file_path.resolve())
|
||||
cursor = self.connection.cursor()
|
||||
@@ -1405,6 +1410,67 @@ class API_folder_store:
|
||||
return False
|
||||
|
||||
file_hash = row[0]
|
||||
|
||||
# Remove backlinks from other files that reference this hash.
|
||||
try:
|
||||
target_hash = str(file_hash or "").strip().lower()
|
||||
backlinks = self.find_files_pointing_to_hash(target_hash)
|
||||
by_src: Dict[str, set[str]] = {}
|
||||
for b in backlinks:
|
||||
src = str((b or {}).get("hash") or "").strip().lower()
|
||||
rt = str((b or {}).get("type") or "").strip()
|
||||
if not src or src == target_hash or not rt:
|
||||
continue
|
||||
by_src.setdefault(src, set()).add(rt)
|
||||
|
||||
for src_hash, rel_types in by_src.items():
|
||||
meta = self.get_metadata(src_hash) or {}
|
||||
rels = meta.get("relationships") if isinstance(meta, dict) else None
|
||||
if not isinstance(rels, dict) or not rels:
|
||||
continue
|
||||
|
||||
changed = False
|
||||
for rt in rel_types:
|
||||
key_to_edit = None
|
||||
for k in list(rels.keys()):
|
||||
if str(k).lower() == str(rt).lower():
|
||||
key_to_edit = str(k)
|
||||
break
|
||||
if not key_to_edit:
|
||||
continue
|
||||
|
||||
bucket = rels.get(key_to_edit)
|
||||
if not isinstance(bucket, list) or not bucket:
|
||||
continue
|
||||
|
||||
new_bucket = [h for h in bucket if str(h or "").strip().lower() != target_hash]
|
||||
if len(new_bucket) == len(bucket):
|
||||
continue
|
||||
|
||||
changed = True
|
||||
if new_bucket:
|
||||
rels[key_to_edit] = new_bucket
|
||||
else:
|
||||
try:
|
||||
del rels[key_to_edit]
|
||||
except Exception:
|
||||
rels[key_to_edit] = []
|
||||
|
||||
if changed:
|
||||
cursor.execute(
|
||||
"""
|
||||
INSERT INTO metadata (hash, relationships)
|
||||
VALUES (?, ?)
|
||||
ON CONFLICT(hash) DO UPDATE SET
|
||||
relationships = excluded.relationships,
|
||||
time_modified = CURRENT_TIMESTAMP,
|
||||
updated_at = CURRENT_TIMESTAMP
|
||||
""",
|
||||
(src_hash, json.dumps(rels if rels else {})),
|
||||
)
|
||||
except Exception:
|
||||
# Best-effort cleanup; deletion should still proceed.
|
||||
pass
|
||||
|
||||
# Delete the file entry (cascades to metadata, tags, notes, etc via foreign keys)
|
||||
cursor.execute("DELETE FROM files WHERE file_path = ?", (str_path,))
|
||||
|
||||
329
CLI.py
329
CLI.py
@@ -1024,6 +1024,94 @@ class CmdletExecutor:
|
||||
|
||||
config = self._config_loader.load()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Single-command Live pipeline progress (match REPL behavior)
|
||||
# ------------------------------------------------------------------
|
||||
progress_ui = None
|
||||
pipe_idx: Optional[int] = None
|
||||
|
||||
def _maybe_start_single_live_progress(
|
||||
*,
|
||||
cmd_name_norm: str,
|
||||
filtered_args: List[str],
|
||||
piped_input: Any,
|
||||
config: Any,
|
||||
) -> None:
|
||||
nonlocal progress_ui, pipe_idx
|
||||
|
||||
# Keep behavior consistent with pipeline runner exclusions.
|
||||
if cmd_name_norm in {"get-relationship", "get-rel", ".pipe", ".matrix"}:
|
||||
return
|
||||
|
||||
try:
|
||||
quiet_mode = bool(config.get("_quiet_background_output")) if isinstance(config, dict) else False
|
||||
except Exception:
|
||||
quiet_mode = False
|
||||
if quiet_mode:
|
||||
return
|
||||
|
||||
try:
|
||||
import sys as _sys
|
||||
|
||||
if not bool(getattr(_sys.stderr, "isatty", lambda: False)()):
|
||||
return
|
||||
except Exception:
|
||||
return
|
||||
|
||||
try:
|
||||
from models import PipelineLiveProgress
|
||||
|
||||
progress_ui = PipelineLiveProgress([cmd_name_norm], enabled=True)
|
||||
progress_ui.start()
|
||||
try:
|
||||
if hasattr(ctx, "set_live_progress"):
|
||||
ctx.set_live_progress(progress_ui)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
pipe_idx = 0
|
||||
|
||||
# Estimate per-item task count for the single pipe.
|
||||
total_items = 1
|
||||
preview_items: Optional[List[Any]] = None
|
||||
try:
|
||||
if isinstance(piped_input, list):
|
||||
total_items = max(1, int(len(piped_input)))
|
||||
preview_items = list(piped_input)
|
||||
elif piped_input is not None:
|
||||
total_items = 1
|
||||
preview_items = [piped_input]
|
||||
else:
|
||||
preview: List[Any] = []
|
||||
toks = list(filtered_args or [])
|
||||
i = 0
|
||||
while i < len(toks):
|
||||
t = str(toks[i])
|
||||
low = t.lower().strip()
|
||||
if low in {"-url", "--url"} and i + 1 < len(toks):
|
||||
nxt = str(toks[i + 1])
|
||||
if nxt and not nxt.startswith("-"):
|
||||
preview.append(nxt)
|
||||
i += 2
|
||||
continue
|
||||
if (not t.startswith("-")) and ("://" in low or low.startswith(("magnet:", "torrent:"))):
|
||||
preview.append(t)
|
||||
i += 1
|
||||
preview_items = preview if preview else None
|
||||
total_items = max(1, int(len(preview)) if preview else 1)
|
||||
except Exception:
|
||||
total_items = 1
|
||||
preview_items = None
|
||||
|
||||
try:
|
||||
progress_ui.begin_pipe(0, total_items=int(total_items), items_preview=preview_items)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
progress_ui = None
|
||||
pipe_idx = None
|
||||
|
||||
|
||||
filtered_args: List[str] = []
|
||||
selected_indices: List[int] = []
|
||||
select_all = False
|
||||
@@ -1099,7 +1187,35 @@ class CmdletExecutor:
|
||||
)
|
||||
|
||||
stage_worker_id = stage_session.worker_id if stage_session else None
|
||||
pipeline_ctx = ctx.PipelineStageContext(stage_index=0, total_stages=1, pipe_index=0, worker_id=stage_worker_id)
|
||||
|
||||
# Start live progress after we know the effective cmd + args + piped input.
|
||||
cmd_norm = str(cmd_name or "").replace("_", "-").strip().lower()
|
||||
_maybe_start_single_live_progress(
|
||||
cmd_name_norm=cmd_norm or str(cmd_name or "").strip().lower(),
|
||||
filtered_args=filtered_args,
|
||||
piped_input=result,
|
||||
config=config,
|
||||
)
|
||||
|
||||
on_emit = None
|
||||
if progress_ui is not None and pipe_idx is not None:
|
||||
_ui = progress_ui
|
||||
|
||||
def _on_emit(obj: Any, _progress=_ui) -> None:
|
||||
try:
|
||||
_progress.on_emit(0, obj)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
on_emit = _on_emit
|
||||
|
||||
pipeline_ctx = ctx.PipelineStageContext(
|
||||
stage_index=0,
|
||||
total_stages=1,
|
||||
pipe_index=pipe_idx if pipe_idx is not None else 0,
|
||||
worker_id=stage_worker_id,
|
||||
on_emit=on_emit,
|
||||
)
|
||||
ctx.set_stage_context(pipeline_ctx)
|
||||
stage_status = "completed"
|
||||
stage_error = ""
|
||||
@@ -1131,6 +1247,19 @@ class CmdletExecutor:
|
||||
if getattr(pipeline_ctx, "emits", None):
|
||||
emits = list(pipeline_ctx.emits)
|
||||
|
||||
# Shared `-path` behavior: if the cmdlet emitted temp/PATH file artifacts,
|
||||
# move them to the user-specified destination and update emitted paths.
|
||||
try:
|
||||
from cmdlet import _shared as sh
|
||||
|
||||
emits = sh.apply_output_path_from_pipeobjects(cmd_name=cmd_name, args=filtered_args, emits=emits)
|
||||
try:
|
||||
pipeline_ctx.emits = list(emits)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Detect format-selection emits and skip printing (user selects with @N).
|
||||
is_format_selection = False
|
||||
if emits:
|
||||
@@ -1195,9 +1324,64 @@ class CmdletExecutor:
|
||||
else:
|
||||
ctx.set_last_result_items_only(emits)
|
||||
|
||||
# Stop Live progress before printing tables.
|
||||
if progress_ui is not None:
|
||||
try:
|
||||
if pipe_idx is not None:
|
||||
progress_ui.finish_pipe(int(pipe_idx), force_complete=(stage_status == "completed"))
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
progress_ui.stop()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
if hasattr(ctx, "set_live_progress"):
|
||||
ctx.set_live_progress(None)
|
||||
except Exception:
|
||||
pass
|
||||
progress_ui = None
|
||||
pipe_idx = None
|
||||
|
||||
stdout_console().print()
|
||||
stdout_console().print(table)
|
||||
|
||||
# If the cmdlet produced a current-stage table without emits (e.g. format selection),
|
||||
# render it here for parity with REPL pipeline runner.
|
||||
if (not getattr(pipeline_ctx, "emits", None)) and hasattr(ctx, "get_current_stage_table"):
|
||||
try:
|
||||
stage_table = ctx.get_current_stage_table()
|
||||
except Exception:
|
||||
stage_table = None
|
||||
if stage_table is not None:
|
||||
try:
|
||||
already_rendered = bool(getattr(stage_table, "_rendered_by_cmdlet", False))
|
||||
except Exception:
|
||||
already_rendered = False
|
||||
|
||||
if already_rendered:
|
||||
return
|
||||
|
||||
if progress_ui is not None:
|
||||
try:
|
||||
if pipe_idx is not None:
|
||||
progress_ui.finish_pipe(int(pipe_idx), force_complete=(stage_status == "completed"))
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
progress_ui.stop()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
if hasattr(ctx, "set_live_progress"):
|
||||
ctx.set_live_progress(None)
|
||||
except Exception:
|
||||
pass
|
||||
progress_ui = None
|
||||
pipe_idx = None
|
||||
stdout_console().print()
|
||||
stdout_console().print(stage_table)
|
||||
|
||||
if ret_code != 0:
|
||||
stage_status = "failed"
|
||||
stage_error = f"exit code {ret_code}"
|
||||
@@ -1207,6 +1391,21 @@ class CmdletExecutor:
|
||||
stage_error = f"{type(exc).__name__}: {exc}"
|
||||
print(f"[error] {type(exc).__name__}: {exc}\n")
|
||||
finally:
|
||||
if progress_ui is not None:
|
||||
try:
|
||||
if pipe_idx is not None:
|
||||
progress_ui.finish_pipe(int(pipe_idx), force_complete=(stage_status == "completed"))
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
progress_ui.stop()
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
if hasattr(ctx, "set_live_progress"):
|
||||
ctx.set_live_progress(None)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
if hasattr(ctx, "clear_current_cmdlet_name"):
|
||||
ctx.clear_current_cmdlet_name()
|
||||
@@ -1245,6 +1444,50 @@ class PipelineExecutor:
|
||||
stages.append(current)
|
||||
return stages
|
||||
|
||||
@staticmethod
|
||||
def _validate_download_media_relationship_order(stages: List[List[str]]) -> bool:
|
||||
"""Guard against running add-relationship on unstored download-media results.
|
||||
|
||||
Intended UX:
|
||||
download-media ... | add-file -store <store> | add-relationship
|
||||
|
||||
Rationale:
|
||||
download-media outputs items that may not yet have a stable store+hash.
|
||||
add-relationship is designed to operate in store/hash mode.
|
||||
"""
|
||||
|
||||
def _norm(name: str) -> str:
|
||||
return str(name or "").replace("_", "-").strip().lower()
|
||||
|
||||
names: List[str] = []
|
||||
for stage in stages or []:
|
||||
if not stage:
|
||||
continue
|
||||
names.append(_norm(stage[0]))
|
||||
|
||||
dl_idxs = [i for i, n in enumerate(names) if n == "download-media"]
|
||||
rel_idxs = [i for i, n in enumerate(names) if n == "add-relationship"]
|
||||
add_file_idxs = [i for i, n in enumerate(names) if n == "add-file"]
|
||||
|
||||
if not dl_idxs or not rel_idxs:
|
||||
return True
|
||||
|
||||
# If download-media is upstream of add-relationship, require an add-file in between.
|
||||
for rel_i in rel_idxs:
|
||||
dl_before = [d for d in dl_idxs if d < rel_i]
|
||||
if not dl_before:
|
||||
continue
|
||||
dl_i = max(dl_before)
|
||||
if not any(dl_i < a < rel_i for a in add_file_idxs):
|
||||
print(
|
||||
"Pipeline order error: when using download-media with add-relationship, "
|
||||
"add-relationship must come after add-file (so items are stored and have store+hash).\n"
|
||||
"Example: download-media <...> | add-file -store <store> | add-relationship\n"
|
||||
)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _try_clear_pipeline_stop(ctx: Any) -> None:
|
||||
try:
|
||||
@@ -1714,6 +1957,11 @@ class PipelineExecutor:
|
||||
name = str(stage_tokens[0]).replace("_", "-").lower()
|
||||
if name == "@" or name.startswith("@"):
|
||||
continue
|
||||
# Display-only: avoid Live progress for relationship viewing.
|
||||
# This keeps `@1 | get-relationship` clean and prevents progress UI
|
||||
# from interfering with Rich tables/panels.
|
||||
if name in {"get-relationship", "get-rel"}:
|
||||
continue
|
||||
# `.pipe` (MPV) is an interactive launcher; disable pipeline Live progress
|
||||
# for it because it doesn't meaningfully "complete" (mpv may keep running)
|
||||
# and Live output interferes with MPV playlist UI.
|
||||
@@ -1792,6 +2040,12 @@ class PipelineExecutor:
|
||||
if initial_piped is not None:
|
||||
piped_result = initial_piped
|
||||
|
||||
# REPL guard: prevent add-relationship before add-file for download-media pipelines.
|
||||
if not self._validate_download_media_relationship_order(stages):
|
||||
pipeline_status = "failed"
|
||||
pipeline_error = "Invalid pipeline order"
|
||||
return
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Multi-level pipeline progress (pipes = stages, tasks = items)
|
||||
# ------------------------------------------------------------------
|
||||
@@ -2069,6 +2323,23 @@ class PipelineExecutor:
|
||||
emits: List[Any] = []
|
||||
if getattr(pipeline_ctx, "emits", None) is not None:
|
||||
emits = list(pipeline_ctx.emits or [])
|
||||
|
||||
# Shared `-path` behavior: persist temp/PATH artifacts to destination.
|
||||
if emits:
|
||||
try:
|
||||
from cmdlet import _shared as sh
|
||||
|
||||
emits = sh.apply_output_path_from_pipeobjects(
|
||||
cmd_name=cmd_name,
|
||||
args=list(stage_args),
|
||||
emits=emits,
|
||||
)
|
||||
try:
|
||||
pipeline_ctx.emits = list(emits)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
if emits:
|
||||
# If the cmdlet already installed an overlay table (e.g. get-tag),
|
||||
# don't overwrite it: set_last_result_items_only() would clear the
|
||||
@@ -2286,25 +2557,12 @@ class PipelineExecutor:
|
||||
except Exception as exc:
|
||||
print(f"[error] Failed to execute pipeline: {exc}\n")
|
||||
|
||||
Welcome = """
|
||||
# MEDIOS-MACINA
|
||||
|
||||
[red]Romans 1:22[/red] Professing themselves to be wise, they became fools,
|
||||
|
||||
|
||||
dfd
|
||||
==
|
||||
Rich can do a pretty *decent* job of rendering markdown.
|
||||
|
||||
1. This is a list item
|
||||
2. This is another list item
|
||||
"""
|
||||
from rich.markdown import Markdown
|
||||
from rich.console import Console
|
||||
|
||||
console = Console()
|
||||
md = Markdown(Welcome)
|
||||
console.print(md)
|
||||
|
||||
class MedeiaCLI:
|
||||
"""Main CLI application object."""
|
||||
|
||||
@@ -2422,6 +2680,32 @@ class MedeiaCLI:
|
||||
|
||||
_ = (search_provider, pipeline, repl, main_callback)
|
||||
|
||||
# Dynamically register all cmdlets as top-level Typer commands so users can
|
||||
# invoke `mm <cmdlet> [args]` directly from the shell. We use Click/Typer
|
||||
# context settings to allow arbitrary flags and options to pass through to
|
||||
# the cmdlet system without Typer trying to parse them.
|
||||
try:
|
||||
names = list_cmdlet_names()
|
||||
skip = {"search-provider", "pipeline", "repl"}
|
||||
for nm in names:
|
||||
if not nm or nm in skip:
|
||||
continue
|
||||
# create a scoped handler to capture the command name
|
||||
def _make_handler(cmd_name: str):
|
||||
@app.command(cmd_name, context_settings={"ignore_unknown_options": True, "allow_extra_args": True})
|
||||
def _handler(ctx: typer.Context):
|
||||
try:
|
||||
args = list(ctx.args or [])
|
||||
except Exception:
|
||||
args = []
|
||||
self._cmdlet_executor.execute(cmd_name, args)
|
||||
return _handler
|
||||
|
||||
_make_handler(nm)
|
||||
except Exception:
|
||||
# Don't let failure to register dynamic commands break startup
|
||||
pass
|
||||
|
||||
return app
|
||||
|
||||
def run(self) -> None:
|
||||
@@ -2438,8 +2722,21 @@ class MedeiaCLI:
|
||||
self.build_app()()
|
||||
|
||||
def run_repl(self) -> None:
|
||||
# (Startup banner is optional; keep the REPL quiet by default.)
|
||||
Welcome = """
|
||||
# MEDIOS-MACINA
|
||||
|
||||
[red]Romans 1:22[/red] Professing themselves to be wise, they became fools,
|
||||
|
||||
|
||||
dfd
|
||||
==
|
||||
Rich can do a pretty *decent* job of rendering markdown.
|
||||
|
||||
1. This is a list item
|
||||
2. This is another list item
|
||||
"""
|
||||
md = Markdown(Welcome)
|
||||
console.print(md)
|
||||
prompt_text = "<🜂🜄|🜁🜃>"
|
||||
|
||||
startup_table = ResultTable(
|
||||
|
||||
520
MPV/LUA/main.lua
520
MPV/LUA/main.lua
@@ -40,6 +40,10 @@ local LOAD_URL_MENU_TYPE = 'medios_load_url'
|
||||
local DOWNLOAD_FORMAT_MENU_TYPE = 'medios_download_pick_format'
|
||||
local DOWNLOAD_STORE_MENU_TYPE = 'medios_download_pick_store'
|
||||
|
||||
-- Menu types for the command submenu and trim prompt
|
||||
local CMD_MENU_TYPE = 'medios_cmd_menu'
|
||||
local TRIM_PROMPT_MENU_TYPE = 'medios_trim_prompt'
|
||||
|
||||
local PIPELINE_REQ_PROP = 'user-data/medeia-pipeline-request'
|
||||
local PIPELINE_RESP_PROP = 'user-data/medeia-pipeline-response'
|
||||
local PIPELINE_READY_PROP = 'user-data/medeia-pipeline-ready'
|
||||
@@ -399,6 +403,373 @@ local function _current_target()
|
||||
return path
|
||||
end
|
||||
|
||||
local ImageControl = {
|
||||
enabled = false,
|
||||
binding_names = {},
|
||||
pan_step = 0.05,
|
||||
pan_step_slow = 0.02,
|
||||
zoom_step = 0.45,
|
||||
zoom_step_slow = 0.15,
|
||||
}
|
||||
|
||||
local MAX_IMAGE_ZOOM = 4.5
|
||||
|
||||
local function _install_q_block()
|
||||
pcall(mp.commandv, 'keybind', 'q', 'script-message', 'medeia-image-quit-block')
|
||||
end
|
||||
|
||||
local function _restore_q_default()
|
||||
pcall(mp.commandv, 'keybind', 'q', 'quit')
|
||||
end
|
||||
|
||||
local function _enable_image_section()
|
||||
pcall(mp.commandv, 'enable-section', 'image', 'allow-hide-cursor')
|
||||
end
|
||||
|
||||
local function _disable_image_section()
|
||||
pcall(mp.commandv, 'disable-section', 'image')
|
||||
end
|
||||
|
||||
mp.register_script_message('medeia-image-quit-block', function()
|
||||
if ImageControl.enabled then
|
||||
mp.osd_message('Press ESC if you really want to quit', 0.7)
|
||||
return
|
||||
end
|
||||
mp.commandv('quit')
|
||||
end)
|
||||
|
||||
local ImageExtensions = {
|
||||
jpg = true,
|
||||
jpeg = true,
|
||||
png = true,
|
||||
gif = true,
|
||||
webp = true,
|
||||
bmp = true,
|
||||
tif = true,
|
||||
tiff = true,
|
||||
heic = true,
|
||||
heif = true,
|
||||
avif = true,
|
||||
ico = true,
|
||||
}
|
||||
|
||||
local function _clean_path_for_extension(path)
|
||||
if type(path) ~= 'string' then
|
||||
return nil
|
||||
end
|
||||
local clean = path:match('([^?]+)') or path
|
||||
clean = clean:match('([^#]+)') or clean
|
||||
local last = clean:match('([^/\\]+)$') or ''
|
||||
local ext = last:match('%.([A-Za-z0-9]+)$')
|
||||
if not ext then
|
||||
return nil
|
||||
end
|
||||
return ext:lower()
|
||||
end
|
||||
|
||||
local function _is_image_path(path)
|
||||
local ext = _clean_path_for_extension(path)
|
||||
return ext and ImageExtensions[ext]
|
||||
end
|
||||
|
||||
local function _get_current_item_is_image()
|
||||
local video_info = mp.get_property_native('current-tracks/video')
|
||||
if type(video_info) == 'table' then
|
||||
if video_info.image and not video_info.albumart then
|
||||
return true
|
||||
end
|
||||
if video_info.image == false and video_info.albumart == true then
|
||||
return false
|
||||
end
|
||||
end
|
||||
local target = _current_target()
|
||||
if target then
|
||||
return _is_image_path(target)
|
||||
end
|
||||
return false
|
||||
end
|
||||
|
||||
-- Cover art / splash support disabled (removed per user request)
|
||||
|
||||
|
||||
local function _set_image_property(value)
|
||||
pcall(mp.set_property_native, 'user-data/mpv/image', value and true or false)
|
||||
end
|
||||
|
||||
local function _show_image_status(message)
|
||||
local zoom = mp.get_property_number('video-zoom') or 0
|
||||
local pan_x = mp.get_property_number('video-pan-x') or 0
|
||||
local pan_y = mp.get_property_number('video-pan-y') or 0
|
||||
local zoom_percent = math.floor((1 + zoom) * 100 + 0.5)
|
||||
local text = string.format('Image: zoom %d%% pan %+.2f %+.2f', zoom_percent, pan_x, pan_y)
|
||||
if message and message ~= '' then
|
||||
text = message .. ' | ' .. text
|
||||
end
|
||||
mp.osd_message(text, 0.7)
|
||||
end
|
||||
|
||||
local function _change_pan(dx, dy)
|
||||
local pan_x = mp.get_property_number('video-pan-x') or 0
|
||||
local pan_y = mp.get_property_number('video-pan-y') or 0
|
||||
mp.set_property_number('video-pan-x', pan_x + dx)
|
||||
mp.set_property_number('video-pan-y', pan_y + dy)
|
||||
_show_image_status()
|
||||
end
|
||||
|
||||
local function _change_zoom(delta)
|
||||
local current = mp.get_property_number('video-zoom') or 0
|
||||
local target = current + delta
|
||||
if target > MAX_IMAGE_ZOOM then
|
||||
target = MAX_IMAGE_ZOOM
|
||||
end
|
||||
if target < -1.0 then
|
||||
target = -1.0
|
||||
end
|
||||
mp.set_property_number('video-zoom', target)
|
||||
mp.set_property('video-unscaled', 'no')
|
||||
if target >= MAX_IMAGE_ZOOM then
|
||||
mp.osd_message('Image zoom maxed at 450%', 0.7)
|
||||
else
|
||||
_show_image_status()
|
||||
end
|
||||
end
|
||||
|
||||
local function _reset_pan_zoom()
|
||||
mp.set_property_number('video-pan-x', 0)
|
||||
mp.set_property_number('video-pan-y', 0)
|
||||
mp.set_property_number('video-zoom', 0)
|
||||
mp.set_property('video-align-x', '0')
|
||||
mp.set_property('video-align-y', '0')
|
||||
mp.set_property('panscan', 0)
|
||||
mp.set_property('video-unscaled', 'no')
|
||||
_show_image_status('Zoom reset')
|
||||
end
|
||||
|
||||
local function _capture_screenshot()
|
||||
mp.commandv('screenshot')
|
||||
mp.osd_message('Screenshot captured', 0.7)
|
||||
end
|
||||
|
||||
mp.register_script_message('medeia-image-screenshot', function()
|
||||
_capture_screenshot()
|
||||
end)
|
||||
|
||||
|
||||
local CLIP_MARKER_SLOT_COUNT = 2
|
||||
local clip_markers = {}
|
||||
local initial_chapters = nil
|
||||
|
||||
local function _format_clip_marker_label(time)
|
||||
if type(time) ~= 'number' then
|
||||
return '0s'
|
||||
end
|
||||
local total = math.max(0, math.floor(time))
|
||||
local hours = math.floor(total / 3600)
|
||||
local minutes = math.floor(total / 60) % 60
|
||||
local seconds = total % 60
|
||||
local parts = {}
|
||||
if hours > 0 then
|
||||
table.insert(parts, ('%dh'):format(hours))
|
||||
end
|
||||
if minutes > 0 or hours > 0 then
|
||||
table.insert(parts, ('%dm'):format(minutes))
|
||||
end
|
||||
table.insert(parts, ('%ds'):format(seconds))
|
||||
return table.concat(parts)
|
||||
end
|
||||
|
||||
local function _apply_clip_chapters()
|
||||
local chapters = {}
|
||||
if initial_chapters then
|
||||
for _, chapter in ipairs(initial_chapters) do table.insert(chapters, chapter) end
|
||||
end
|
||||
for idx = 1, CLIP_MARKER_SLOT_COUNT do
|
||||
local time = clip_markers[idx]
|
||||
if time and type(time) == 'number' then
|
||||
table.insert(chapters, {
|
||||
time = time,
|
||||
title = _format_clip_marker_label(time),
|
||||
})
|
||||
end
|
||||
end
|
||||
table.sort(chapters, function(a, b) return (a.time or 0) < (b.time or 0) end)
|
||||
mp.set_property_native('chapter-list', chapters)
|
||||
end
|
||||
|
||||
local function _reset_clip_markers()
|
||||
for idx = 1, CLIP_MARKER_SLOT_COUNT do
|
||||
clip_markers[idx] = nil
|
||||
end
|
||||
_apply_clip_chapters()
|
||||
end
|
||||
|
||||
local function _capture_clip()
|
||||
local time = mp.get_property_number('time-pos') or mp.get_property_number('time')
|
||||
if not time then
|
||||
mp.osd_message('Cannot capture clip; no time available', 0.7)
|
||||
return
|
||||
end
|
||||
local slot = nil
|
||||
for idx = 1, CLIP_MARKER_SLOT_COUNT do
|
||||
if not clip_markers[idx] then
|
||||
slot = idx
|
||||
break
|
||||
end
|
||||
end
|
||||
if not slot then
|
||||
local best = math.huge
|
||||
for idx = 1, CLIP_MARKER_SLOT_COUNT do
|
||||
local existing = clip_markers[idx]
|
||||
local distance = math.abs((existing or 0) - time)
|
||||
if distance < best then
|
||||
best = distance
|
||||
slot = idx
|
||||
end
|
||||
end
|
||||
slot = slot or 1
|
||||
end
|
||||
clip_markers[slot] = time
|
||||
_apply_clip_chapters()
|
||||
mp.commandv('screenshot-to-file', ('clip-%s-%.0f.png'):format(os.date('%Y%m%d-%H%M%S'), time))
|
||||
local label = _format_clip_marker_label(time)
|
||||
mp.osd_message(('Clip marker %d set at %s'):format(slot, label), 0.7)
|
||||
end
|
||||
|
||||
mp.register_event('file-loaded', function()
|
||||
initial_chapters = mp.get_property_native('chapter-list') or {}
|
||||
_reset_clip_markers()
|
||||
end)
|
||||
|
||||
mp.register_script_message('medeia-image-clip', function()
|
||||
_capture_clip()
|
||||
end)
|
||||
|
||||
local function _get_trim_range_from_clip_markers()
|
||||
local times = {}
|
||||
for idx = 1, CLIP_MARKER_SLOT_COUNT do
|
||||
local t = clip_markers[idx]
|
||||
if type(t) == 'number' then
|
||||
table.insert(times, t)
|
||||
end
|
||||
end
|
||||
table.sort(times, function(a, b) return a < b end)
|
||||
if #times < 2 then
|
||||
return nil
|
||||
end
|
||||
local start_t = times[1]
|
||||
local end_t = times[2]
|
||||
if type(start_t) ~= 'number' or type(end_t) ~= 'number' then
|
||||
return nil
|
||||
end
|
||||
if end_t <= start_t then
|
||||
return nil
|
||||
end
|
||||
return _format_clip_marker_label(start_t) .. '-' .. _format_clip_marker_label(end_t)
|
||||
end
|
||||
|
||||
local function _audio_only()
|
||||
mp.commandv('set', 'vid', 'no')
|
||||
mp.osd_message('Audio-only playback enabled', 1)
|
||||
end
|
||||
|
||||
mp.register_script_message('medeia-audio-only', function()
|
||||
_audio_only()
|
||||
end)
|
||||
|
||||
local function _bind_image_key(key, name, fn, opts)
|
||||
opts = opts or {}
|
||||
if ImageControl.binding_names[name] then
|
||||
pcall(mp.remove_key_binding, name)
|
||||
ImageControl.binding_names[name] = nil
|
||||
end
|
||||
local ok, err = pcall(mp.add_forced_key_binding, key, name, fn, opts)
|
||||
if ok then
|
||||
ImageControl.binding_names[name] = true
|
||||
else
|
||||
mp.msg.warn('Failed to add image binding ' .. tostring(key) .. ': ' .. tostring(err))
|
||||
end
|
||||
end
|
||||
|
||||
local function _unbind_image_keys()
|
||||
for name in pairs(ImageControl.binding_names) do
|
||||
pcall(mp.remove_key_binding, name)
|
||||
ImageControl.binding_names[name] = nil
|
||||
end
|
||||
end
|
||||
|
||||
local function _activate_image_controls()
|
||||
if ImageControl.enabled then
|
||||
return
|
||||
end
|
||||
ImageControl.enabled = true
|
||||
_set_image_property(true)
|
||||
_enable_image_section()
|
||||
mp.osd_message('Image viewer controls enabled', 1.2)
|
||||
|
||||
_bind_image_key('LEFT', 'image-pan-left', function() _change_pan(-ImageControl.pan_step, 0) end, {repeatable=true})
|
||||
_bind_image_key('RIGHT', 'image-pan-right', function() _change_pan(ImageControl.pan_step, 0) end, {repeatable=true})
|
||||
_bind_image_key('s', 'image-pan-s', function() _change_pan(0, ImageControl.pan_step) end, {repeatable=true})
|
||||
_bind_image_key('a', 'image-pan-a', function() _change_pan(ImageControl.pan_step, 0) end, {repeatable=true})
|
||||
_bind_image_key('d', 'image-pan-d', function() _change_pan(-ImageControl.pan_step, 0) end, {repeatable=true})
|
||||
_bind_image_key('Shift+RIGHT', 'image-pan-right-fine', function() _change_pan(ImageControl.pan_step_slow, 0) end, {repeatable=true})
|
||||
_bind_image_key('Shift+UP', 'image-pan-up-fine', function() _change_pan(0, -ImageControl.pan_step_slow) end, {repeatable=true})
|
||||
_bind_image_key('Shift+DOWN', 'image-pan-down-fine', function() _change_pan(0, ImageControl.pan_step_slow) end, {repeatable=true})
|
||||
_bind_image_key('h', 'image-pan-h', function() _change_pan(-ImageControl.pan_step, 0) end, {repeatable=true})
|
||||
_bind_image_key('l', 'image-pan-l', function() _change_pan(ImageControl.pan_step, 0) end, {repeatable=true})
|
||||
_bind_image_key('j', 'image-pan-j', function() _change_pan(0, ImageControl.pan_step) end, {repeatable=true})
|
||||
_bind_image_key('k', 'image-pan-k', function() _change_pan(0, -ImageControl.pan_step) end, {repeatable=true})
|
||||
_bind_image_key('w', 'image-pan-w', function() _change_pan(0, -ImageControl.pan_step) end, {repeatable=true})
|
||||
_bind_image_key('s', 'image-pan-s', function() _change_pan(0, ImageControl.pan_step) end, {repeatable=true})
|
||||
_bind_image_key('a', 'image-pan-a', function() _change_pan(ImageControl.pan_step, 0) end, {repeatable=true})
|
||||
_bind_image_key('d', 'image-pan-d', function() _change_pan(-ImageControl.pan_step, 0) end, {repeatable=true})
|
||||
|
||||
_bind_image_key('=', 'image-zoom-in', function() _change_zoom(ImageControl.zoom_step) end, {repeatable=true})
|
||||
_disable_image_section()
|
||||
_bind_image_key('-', 'image-zoom-out', function() _change_zoom(-ImageControl.zoom_step) end, {repeatable=true})
|
||||
_bind_image_key('+', 'image-zoom-in-fine', function() _change_zoom(ImageControl.zoom_step_slow) end, {repeatable=true})
|
||||
_bind_image_key('_', 'image-zoom-out-fine', function() _change_zoom(-ImageControl.zoom_step_slow) end, {repeatable=true})
|
||||
_bind_image_key('0', 'image-zoom-reset', _reset_pan_zoom)
|
||||
_bind_image_key('Space', 'image-status', function() _show_image_status('Image status') end)
|
||||
_bind_image_key('f', 'image-screenshot', _capture_screenshot)
|
||||
_install_q_block()
|
||||
end
|
||||
|
||||
local function _deactivate_image_controls()
|
||||
if not ImageControl.enabled then
|
||||
return
|
||||
end
|
||||
ImageControl.enabled = false
|
||||
_set_image_property(false)
|
||||
_restore_q_default()
|
||||
_unbind_image_keys()
|
||||
mp.osd_message('Image viewer controls disabled', 1.0)
|
||||
mp.set_property('panscan', 0)
|
||||
mp.set_property('video-zoom', 0)
|
||||
mp.set_property_number('video-pan-x', 0)
|
||||
mp.set_property_number('video-pan-y', 0)
|
||||
mp.set_property('video-align-x', '0')
|
||||
mp.set_property('video-align-y', '0')
|
||||
end
|
||||
|
||||
local function _update_image_mode()
|
||||
local should_image = _get_current_item_is_image()
|
||||
if should_image then
|
||||
_activate_image_controls()
|
||||
else
|
||||
_deactivate_image_controls()
|
||||
end
|
||||
end
|
||||
|
||||
mp.register_event('file-loaded', function()
|
||||
_update_image_mode()
|
||||
end)
|
||||
|
||||
mp.register_event('shutdown', function()
|
||||
_restore_q_default()
|
||||
end)
|
||||
|
||||
_update_image_mode()
|
||||
|
||||
local function _extract_store_hash(target)
|
||||
if type(target) ~= 'string' or target == '' then
|
||||
return nil
|
||||
@@ -1554,6 +1925,152 @@ function M.open_load_url_prompt()
|
||||
end
|
||||
end
|
||||
|
||||
-- Open the command submenu with tailored cmdlets (screenshot, clip, trim prompt)
|
||||
function M.open_cmd_menu()
|
||||
local items = {
|
||||
{
|
||||
title = 'Screenshot',
|
||||
hint = 'Capture a screenshot',
|
||||
value = { 'script-message-to', mp.get_script_name(), 'medios-cmd-exec', utils.format_json({ cmd = 'screenshot' }) },
|
||||
},
|
||||
{
|
||||
title = 'Capture clip marker',
|
||||
hint = 'Place a clip marker at current time',
|
||||
value = { 'script-message-to', mp.get_script_name(), 'medios-cmd-exec', utils.format_json({ cmd = 'clip' }) },
|
||||
},
|
||||
{
|
||||
title = 'Trim file',
|
||||
hint = 'Trim current file (prompt for range)',
|
||||
value = { 'script-message-to', mp.get_script_name(), 'medios-cmd-exec', utils.format_json({ cmd = 'trim' }) },
|
||||
},
|
||||
}
|
||||
|
||||
local menu_data = {
|
||||
type = CMD_MENU_TYPE,
|
||||
title = 'Cmd',
|
||||
search_style = 'palette',
|
||||
search_debounce = 'submit',
|
||||
footnote = 'Type to filter or pick a command',
|
||||
items = items,
|
||||
}
|
||||
|
||||
local json = utils.format_json(menu_data)
|
||||
if ensure_uosc_loaded() then
|
||||
mp.commandv('script-message-to', 'uosc', 'open-menu', json)
|
||||
else
|
||||
_lua_log('menu: uosc not available; cannot open cmd menu')
|
||||
end
|
||||
end
|
||||
|
||||
-- Prompt for trim range via an input box and callback
|
||||
local function _start_trim_with_range(range)
|
||||
range = trim(tostring(range or ''))
|
||||
if range == '' then
|
||||
mp.osd_message('Trim cancelled (no range provided)', 3)
|
||||
return
|
||||
end
|
||||
|
||||
local target = _current_target()
|
||||
if not target or target == '' then
|
||||
mp.osd_message('No file to trim', 3)
|
||||
return
|
||||
end
|
||||
|
||||
local store_hash = _extract_store_hash(target)
|
||||
|
||||
-- Prefer the resolved stream URL/filename so trimming can avoid full downloads where possible.
|
||||
local stream = trim(tostring(mp.get_property('stream-open-filename') or ''))
|
||||
if stream == '' then
|
||||
stream = tostring(target)
|
||||
end
|
||||
|
||||
local pipeline_cmd
|
||||
if store_hash then
|
||||
pipeline_cmd =
|
||||
'get-tag -emit -store ' .. quote_pipeline_arg(store_hash.store) ..
|
||||
' -query ' .. quote_pipeline_arg('hash:' .. store_hash.hash) ..
|
||||
' | trim-file -input ' .. quote_pipeline_arg(stream) ..
|
||||
' -range ' .. quote_pipeline_arg(range) ..
|
||||
' | add-file -store ' .. quote_pipeline_arg(store_hash.store)
|
||||
else
|
||||
if utils.file_info(tostring(target)) then
|
||||
pipeline_cmd = 'trim-file -path ' .. quote_pipeline_arg(target) .. ' -range ' .. quote_pipeline_arg(range)
|
||||
else
|
||||
pipeline_cmd = 'trim-file -input ' .. quote_pipeline_arg(stream) .. ' -range ' .. quote_pipeline_arg(range)
|
||||
end
|
||||
end
|
||||
|
||||
if not _run_pipeline_detached(pipeline_cmd) then
|
||||
M.run_pipeline(pipeline_cmd)
|
||||
end
|
||||
mp.osd_message('Trim started', 3)
|
||||
end
|
||||
|
||||
function M.open_trim_prompt()
|
||||
local marker_range = _get_trim_range_from_clip_markers()
|
||||
if marker_range then
|
||||
_start_trim_with_range(marker_range)
|
||||
return
|
||||
end
|
||||
|
||||
local menu_data = {
|
||||
type = TRIM_PROMPT_MENU_TYPE,
|
||||
title = 'Trim file',
|
||||
search_style = 'palette',
|
||||
search_debounce = 'submit',
|
||||
on_search = 'callback',
|
||||
footnote = "Enter time range (e.g. '00:03:45-00:03:55' or '1h3m-1h10m30s') and press Enter",
|
||||
callback = { mp.get_script_name(), 'medios-trim-run' },
|
||||
items = {
|
||||
{
|
||||
title = 'Enter range...',
|
||||
hint = 'Type range and press Enter',
|
||||
value = { 'script-message-to', mp.get_script_name(), 'medios-trim-run' },
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
local json = utils.format_json(menu_data)
|
||||
if ensure_uosc_loaded() then
|
||||
mp.commandv('script-message-to', 'uosc', 'open-menu', json)
|
||||
else
|
||||
_lua_log('menu: uosc not available; cannot open trim prompt')
|
||||
end
|
||||
end
|
||||
|
||||
-- Handlers for the command submenu
|
||||
mp.register_script_message('medios-open-cmd', function()
|
||||
M.open_cmd_menu()
|
||||
end)
|
||||
|
||||
mp.register_script_message('medios-cmd-exec', function(json)
|
||||
local ok, ev = pcall(utils.parse_json, json)
|
||||
if not ok or type(ev) ~= 'table' then
|
||||
return
|
||||
end
|
||||
local cmd = trim(tostring(ev.cmd or ''))
|
||||
if cmd == 'screenshot' then
|
||||
_capture_screenshot()
|
||||
elseif cmd == 'clip' then
|
||||
_capture_clip()
|
||||
elseif cmd == 'trim' then
|
||||
M.open_trim_prompt()
|
||||
else
|
||||
mp.osd_message('Unknown cmd ' .. tostring(cmd), 2)
|
||||
end
|
||||
end)
|
||||
|
||||
mp.register_script_message('medios-trim-run', function(json)
|
||||
local ok, ev = pcall(utils.parse_json, json)
|
||||
local range = nil
|
||||
if ok and type(ev) == 'table' then
|
||||
if ev.type == 'search' then
|
||||
range = trim(tostring(ev.query or ''))
|
||||
end
|
||||
end
|
||||
_start_trim_with_range(range)
|
||||
end)
|
||||
|
||||
mp.register_script_message('medios-load-url', function()
|
||||
M.open_load_url_prompt()
|
||||
end)
|
||||
@@ -1591,6 +2108,7 @@ function M.show_menu()
|
||||
{ title = "Get Metadata", value = "script-binding medios-info", hint = "Ctrl+i" },
|
||||
{ title = "Delete File", value = "script-binding medios-delete", hint = "Ctrl+Del" },
|
||||
{ title = "Load URL", value = {"script-message-to", mp.get_script_name(), "medios-load-url"} },
|
||||
{ title = "Cmd", value = {"script-message-to", mp.get_script_name(), "medios-open-cmd"}, hint = "Run quick commands (screenshot, trim, etc)" },
|
||||
{ title = "Download", value = {"script-message-to", mp.get_script_name(), "medios-download-current"} },
|
||||
{ title = "Change Format", value = {"script-message-to", mp.get_script_name(), "medios-change-format-current"} },
|
||||
}
|
||||
@@ -1614,6 +2132,8 @@ mp.add_key_binding("ctrl+del", "medios-delete", M.delete_current_file)
|
||||
mp.add_key_binding("l", "medeia-lyric-toggle", lyric_toggle)
|
||||
mp.add_key_binding("L", "medeia-lyric-toggle-shift", lyric_toggle)
|
||||
|
||||
-- Cover art observers removed (disabled per user request)
|
||||
|
||||
-- Start the persistent pipeline helper eagerly at launch.
|
||||
-- This avoids spawning Python per command and works cross-platform via MPV IPC.
|
||||
mp.add_timeout(0, function()
|
||||
|
||||
150
MPV/lyric.py
150
MPV/lyric.py
@@ -467,6 +467,66 @@ def _extract_lrc_from_notes(notes: Dict[str, str]) -> Optional[str]:
|
||||
return text if text.strip() else None
|
||||
|
||||
|
||||
def _extract_sub_from_notes(notes: Dict[str, str]) -> Optional[str]:
|
||||
"""Return raw subtitle text from the note named 'sub'."""
|
||||
if not isinstance(notes, dict) or not notes:
|
||||
return None
|
||||
|
||||
raw = None
|
||||
for k, v in notes.items():
|
||||
if not isinstance(k, str):
|
||||
continue
|
||||
if k.strip() == "sub":
|
||||
raw = v
|
||||
break
|
||||
|
||||
if not isinstance(raw, str):
|
||||
return None
|
||||
|
||||
text = raw.strip("\ufeff\r\n")
|
||||
return text if text.strip() else None
|
||||
|
||||
|
||||
def _infer_sub_extension(text: str) -> str:
|
||||
# Best-effort: mpv generally understands SRT/VTT; choose based on content.
|
||||
t = (text or "").lstrip("\ufeff\r\n").lstrip()
|
||||
if t.upper().startswith("WEBVTT"):
|
||||
return ".vtt"
|
||||
if "-->" in t:
|
||||
# SRT typically uses commas for milliseconds, VTT uses dots.
|
||||
if re.search(r"\d\d:\d\d:\d\d,\d\d\d\s*-->\s*\d\d:\d\d:\d\d,\d\d\d", t):
|
||||
return ".srt"
|
||||
return ".vtt"
|
||||
return ".vtt"
|
||||
|
||||
|
||||
def _write_temp_sub_file(*, key: str, text: str) -> Path:
|
||||
# Write to a content-addressed temp path so updates force mpv reload.
|
||||
tmp_dir = Path(tempfile.gettempdir()) / "medeia-mpv-notes"
|
||||
tmp_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
ext = _infer_sub_extension(text)
|
||||
digest = hashlib.sha1((key + "\n" + (text or "")).encode("utf-8", errors="ignore")).hexdigest()[:16]
|
||||
safe_key = hashlib.sha1((key or "").encode("utf-8", errors="ignore")).hexdigest()[:12]
|
||||
path = (tmp_dir / f"sub-{safe_key}-{digest}{ext}").resolve()
|
||||
path.write_text(text or "", encoding="utf-8", errors="replace")
|
||||
return path
|
||||
|
||||
|
||||
def _try_remove_selected_external_sub(client: MPVIPCClient) -> None:
|
||||
try:
|
||||
client.send_command({"command": ["sub-remove"]})
|
||||
except Exception:
|
||||
return
|
||||
|
||||
|
||||
def _try_add_external_sub(client: MPVIPCClient, path: Path) -> None:
|
||||
try:
|
||||
client.send_command({"command": ["sub-add", str(path), "select", "medeia-sub"]})
|
||||
except Exception:
|
||||
return
|
||||
|
||||
|
||||
def _is_stream_target(target: str) -> bool:
|
||||
"""Return True when mpv's 'path' is not a local filesystem file.
|
||||
|
||||
@@ -726,7 +786,7 @@ def _infer_hash_for_target(target: str) -> Optional[str]:
|
||||
|
||||
|
||||
def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] = None) -> int:
|
||||
"""Auto mode: track mpv's current file and render lyrics from store notes (note name: 'lyric')."""
|
||||
"""Auto mode: track mpv's current file and render lyrics (note: 'lyric') or load subtitles (note: 'sub')."""
|
||||
cfg = config or {}
|
||||
|
||||
client = mpv.client()
|
||||
@@ -742,6 +802,8 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
current_key: Optional[str] = None
|
||||
current_backend: Optional[Any] = None
|
||||
last_loaded_key: Optional[str] = None
|
||||
last_loaded_mode: Optional[str] = None # 'lyric' | 'sub'
|
||||
last_loaded_sub_path: Optional[Path] = None
|
||||
last_fetch_attempt_key: Optional[str] = None
|
||||
last_fetch_attempt_at: float = 0.0
|
||||
|
||||
@@ -808,6 +870,9 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
_osd_overlay_clear(client)
|
||||
except Exception:
|
||||
pass
|
||||
if last_loaded_sub_path is not None:
|
||||
_try_remove_selected_external_sub(client)
|
||||
last_loaded_sub_path = None
|
||||
last_target = target
|
||||
current_store_name = None
|
||||
current_file_hash = None
|
||||
@@ -816,6 +881,7 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
entries = []
|
||||
times = []
|
||||
last_loaded_key = None
|
||||
last_loaded_mode = None
|
||||
time.sleep(poll_s)
|
||||
continue
|
||||
|
||||
@@ -833,6 +899,10 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
if last_loaded_key is not None:
|
||||
_osd_overlay_clear(client)
|
||||
last_loaded_key = None
|
||||
last_loaded_mode = None
|
||||
if last_loaded_sub_path is not None:
|
||||
_try_remove_selected_external_sub(client)
|
||||
last_loaded_sub_path = None
|
||||
time.sleep(poll_s)
|
||||
continue
|
||||
|
||||
@@ -850,6 +920,10 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
if last_loaded_key is not None:
|
||||
_osd_overlay_clear(client)
|
||||
last_loaded_key = None
|
||||
last_loaded_mode = None
|
||||
if last_loaded_sub_path is not None:
|
||||
_try_remove_selected_external_sub(client)
|
||||
last_loaded_sub_path = None
|
||||
time.sleep(poll_s)
|
||||
continue
|
||||
|
||||
@@ -869,6 +943,10 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
if last_loaded_key is not None:
|
||||
_osd_overlay_clear(client)
|
||||
last_loaded_key = None
|
||||
last_loaded_mode = None
|
||||
if last_loaded_sub_path is not None:
|
||||
_try_remove_selected_external_sub(client)
|
||||
last_loaded_sub_path = None
|
||||
time.sleep(poll_s)
|
||||
continue
|
||||
|
||||
@@ -887,6 +965,10 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
if last_loaded_key is not None:
|
||||
_osd_overlay_clear(client)
|
||||
last_loaded_key = None
|
||||
last_loaded_mode = None
|
||||
if last_loaded_sub_path is not None:
|
||||
_try_remove_selected_external_sub(client)
|
||||
last_loaded_sub_path = None
|
||||
time.sleep(poll_s)
|
||||
continue
|
||||
|
||||
@@ -913,6 +995,10 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
if last_loaded_key is not None:
|
||||
_osd_overlay_clear(client)
|
||||
last_loaded_key = None
|
||||
last_loaded_mode = None
|
||||
if last_loaded_sub_path is not None:
|
||||
_try_remove_selected_external_sub(client)
|
||||
last_loaded_sub_path = None
|
||||
time.sleep(poll_s)
|
||||
continue
|
||||
|
||||
@@ -930,9 +1016,41 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
except Exception:
|
||||
_log("Loaded notes keys: <error>")
|
||||
|
||||
lrc_text = _extract_lrc_from_notes(notes)
|
||||
if not lrc_text:
|
||||
_log("No lyric note found (note name: 'lyric')")
|
||||
sub_text = _extract_sub_from_notes(notes)
|
||||
if sub_text:
|
||||
# Treat subtitles as an alternative to lyrics; do not show the lyric overlay.
|
||||
try:
|
||||
_osd_overlay_clear(client)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
sub_path = _write_temp_sub_file(key=current_key, text=sub_text)
|
||||
except Exception as exc:
|
||||
_log(f"Failed to write sub note temp file: {exc}")
|
||||
sub_path = None
|
||||
|
||||
if sub_path is not None:
|
||||
# If we previously loaded a sub, remove it first to avoid stacking.
|
||||
if last_loaded_sub_path is not None:
|
||||
_try_remove_selected_external_sub(client)
|
||||
_try_add_external_sub(client, sub_path)
|
||||
last_loaded_sub_path = sub_path
|
||||
|
||||
entries = []
|
||||
times = []
|
||||
last_loaded_key = current_key
|
||||
last_loaded_mode = "sub"
|
||||
|
||||
else:
|
||||
# Switching away from sub-note mode: best-effort unload the selected external subtitle.
|
||||
if last_loaded_mode == "sub" and last_loaded_sub_path is not None:
|
||||
_try_remove_selected_external_sub(client)
|
||||
last_loaded_sub_path = None
|
||||
|
||||
lrc_text = _extract_lrc_from_notes(notes)
|
||||
if not lrc_text:
|
||||
_log("No lyric note found (note name: 'lyric')")
|
||||
|
||||
# Auto-fetch path: fetch and persist lyrics into the note named 'lyric'.
|
||||
# Throttle attempts per key to avoid hammering APIs.
|
||||
@@ -981,18 +1099,20 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
|
||||
else:
|
||||
_log("Autofetch: no lyrics found")
|
||||
|
||||
entries = []
|
||||
times = []
|
||||
if last_loaded_key is not None:
|
||||
_osd_overlay_clear(client)
|
||||
last_loaded_key = None
|
||||
else:
|
||||
_log(f"Loaded lyric note ({len(lrc_text)} chars)")
|
||||
entries = []
|
||||
times = []
|
||||
if last_loaded_key is not None:
|
||||
_osd_overlay_clear(client)
|
||||
last_loaded_key = None
|
||||
last_loaded_mode = None
|
||||
else:
|
||||
_log(f"Loaded lyric note ({len(lrc_text)} chars)")
|
||||
|
||||
parsed = parse_lrc(lrc_text)
|
||||
entries = parsed
|
||||
times = [e.time_s for e in entries]
|
||||
last_loaded_key = current_key
|
||||
parsed = parse_lrc(lrc_text)
|
||||
entries = parsed
|
||||
times = [e.time_s for e in entries]
|
||||
last_loaded_key = current_key
|
||||
last_loaded_mode = "lyric"
|
||||
|
||||
try:
|
||||
# mpv returns None when idle/no file.
|
||||
|
||||
BIN
MPV/splash.png
Normal file
BIN
MPV/splash.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.1 MiB |
48
SYS/utils.py
48
SYS/utils.py
@@ -3,7 +3,12 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import hashlib
|
||||
import ffmpeg
|
||||
import subprocess
|
||||
import shutil
|
||||
try:
|
||||
import ffmpeg # type: ignore
|
||||
except Exception:
|
||||
ffmpeg = None # type: ignore
|
||||
import base64
|
||||
import logging
|
||||
import time
|
||||
@@ -130,10 +135,45 @@ def create_tags_sidecar(file_path: Path, tags: set) -> None:
|
||||
|
||||
|
||||
def ffprobe(file_path: str) -> dict:
|
||||
probe = ffmpeg.probe(file_path)
|
||||
metadata = {}
|
||||
"""Probe a media file and return a metadata dictionary.
|
||||
|
||||
# Format-level info
|
||||
This function prefers the python `ffmpeg` module (ffmpeg-python) when available.
|
||||
If that is not present, it will attempt to call the external `ffprobe` binary if found
|
||||
on PATH. If neither is available or probing fails, an empty dict is returned.
|
||||
"""
|
||||
probe = None
|
||||
|
||||
# Try python ffmpeg module first
|
||||
if ffmpeg is not None:
|
||||
try:
|
||||
probe = ffmpeg.probe(file_path)
|
||||
except Exception as exc: # pragma: no cover - environment dependent
|
||||
_format_logger.debug("ffmpeg.probe failed: %s", exc)
|
||||
probe = None
|
||||
|
||||
# Fall back to external ffprobe if available
|
||||
if probe is None:
|
||||
ffprobe_cmd = shutil.which("ffprobe")
|
||||
if ffprobe_cmd:
|
||||
try:
|
||||
proc = subprocess.run(
|
||||
[ffprobe_cmd, "-v", "quiet", "-print_format", "json", "-show_format", "-show_streams", str(file_path)],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
probe = json.loads(proc.stdout)
|
||||
except Exception as exc: # pragma: no cover - environment dependent
|
||||
_format_logger.debug("External ffprobe failed: %s", exc)
|
||||
probe = None
|
||||
else:
|
||||
_format_logger.debug("No ffmpeg Python module and no ffprobe binary found")
|
||||
return {}
|
||||
|
||||
if not isinstance(probe, dict):
|
||||
return {}
|
||||
|
||||
metadata = {}
|
||||
fmt = probe.get("format", {})
|
||||
metadata["duration"] = float(fmt.get("duration", 0)) if "duration" in fmt else None
|
||||
metadata["size"] = int(fmt.get("size", 0)) if "size" in fmt else None
|
||||
|
||||
@@ -384,6 +384,8 @@ class Folder(Store):
|
||||
try:
|
||||
shutil.move(str(file_path), str(save_file))
|
||||
debug(f"Local move: {save_file}", file=sys.stderr)
|
||||
# After a move, the original path no longer exists; use destination for subsequent ops.
|
||||
file_path = save_file
|
||||
except Exception:
|
||||
_copy_with_progress(file_path, save_file, label=f"folder:{self._name} move")
|
||||
try:
|
||||
@@ -395,6 +397,7 @@ class Folder(Store):
|
||||
except Exception:
|
||||
pass
|
||||
debug(f"Local move (copy+delete): {save_file}", file=sys.stderr)
|
||||
file_path = save_file
|
||||
else:
|
||||
_copy_with_progress(file_path, save_file, label=f"folder:{self._name} copy")
|
||||
debug(f"Local copy: {save_file}", file=sys.stderr)
|
||||
@@ -418,7 +421,7 @@ class Folder(Store):
|
||||
db.save_metadata(save_file, {
|
||||
'hash': file_hash,
|
||||
'ext': ext_clean,
|
||||
'size': file_path.stat().st_size,
|
||||
'size': save_file.stat().st_size,
|
||||
'duration': duration_value,
|
||||
})
|
||||
|
||||
@@ -441,6 +444,7 @@ class Folder(Store):
|
||||
"""Search local database for files by title tag or filename."""
|
||||
from fnmatch import fnmatch
|
||||
from API.folder import DatabaseAPI
|
||||
import unicodedata
|
||||
|
||||
limit = kwargs.get("limit")
|
||||
try:
|
||||
@@ -453,6 +457,30 @@ class Folder(Store):
|
||||
query = query.lower()
|
||||
query_lower = query # Ensure query_lower is defined for all code paths
|
||||
|
||||
def _normalize_namespace_text(text: str, *, allow_wildcards: bool) -> str:
|
||||
"""Normalize tag namespace values for consistent matching.
|
||||
|
||||
Removes control/format chars (e.g. zero-width spaces) that frequently appear in scraped tags,
|
||||
collapses whitespace, and lowercases.
|
||||
"""
|
||||
s = str(text or "")
|
||||
# Normalize newlines/tabs/etc to spaces early.
|
||||
s = s.replace("\r", " ").replace("\n", " ").replace("\t", " ")
|
||||
# Drop control / format chars (Cc/Cf) while preserving wildcard tokens when requested.
|
||||
cleaned_chars: list[str] = []
|
||||
for ch in s:
|
||||
if allow_wildcards and ch in {"*", "?"}:
|
||||
cleaned_chars.append(ch)
|
||||
continue
|
||||
cat = unicodedata.category(ch)
|
||||
if cat in {"Cc", "Cf"}:
|
||||
continue
|
||||
cleaned_chars.append(ch)
|
||||
s = "".join(cleaned_chars)
|
||||
# Collapse any remaining unicode whitespace runs.
|
||||
s = " ".join(s.split())
|
||||
return s.strip().lower()
|
||||
|
||||
def _normalize_ext_filter(value: str) -> str:
|
||||
v = str(value or "").strip().lower().lstrip('.')
|
||||
v = "".join(ch for ch in v if ch.isalnum())
|
||||
@@ -648,8 +676,9 @@ class Folder(Store):
|
||||
tag_lower = str(tag_val).lower()
|
||||
if not tag_lower.startswith(f"{namespace}:"):
|
||||
continue
|
||||
value = tag_lower[len(namespace)+1:]
|
||||
if fnmatch(value, pattern):
|
||||
value = _normalize_namespace_text(tag_lower[len(namespace) + 1 :], allow_wildcards=False)
|
||||
pat = _normalize_namespace_text(pattern, allow_wildcards=True)
|
||||
if fnmatch(value, pat):
|
||||
matched.add(file_hash)
|
||||
return matched
|
||||
|
||||
@@ -838,8 +867,9 @@ class Folder(Store):
|
||||
for tag in tags:
|
||||
tag_lower = tag.lower()
|
||||
if tag_lower.startswith(f"{namespace}:"):
|
||||
value = tag_lower[len(namespace)+1:]
|
||||
if fnmatch(value, pattern):
|
||||
value = _normalize_namespace_text(tag_lower[len(namespace) + 1 :], allow_wildcards=False)
|
||||
pat = _normalize_namespace_text(pattern, allow_wildcards=True)
|
||||
if fnmatch(value, pat):
|
||||
if ext_hashes is not None and file_hash not in ext_hashes:
|
||||
break
|
||||
file_path = Path(file_path_str)
|
||||
@@ -1636,20 +1666,43 @@ class Folder(Store):
|
||||
"""
|
||||
from API.folder import API_folder_store
|
||||
try:
|
||||
file_path = Path(file_identifier)
|
||||
|
||||
# Delete from database
|
||||
with API_folder_store(Path(self._location)) as db:
|
||||
db.delete_file(file_path)
|
||||
|
||||
# Delete the actual file from disk
|
||||
if file_path.exists():
|
||||
file_path.unlink()
|
||||
debug(f"Deleted file: {file_path}")
|
||||
return True
|
||||
else:
|
||||
debug(f"File not found on disk: {file_path}")
|
||||
return True # Already gone
|
||||
if not self._location:
|
||||
return False
|
||||
|
||||
raw = str(file_identifier or "").strip()
|
||||
if not raw:
|
||||
return False
|
||||
|
||||
store_root = Path(self._location).expanduser()
|
||||
|
||||
# Support deletion by hash (common for store items where `path` is the hash).
|
||||
file_hash = _normalize_hash(raw)
|
||||
resolved_path: Optional[Path] = None
|
||||
with API_folder_store(store_root) as db:
|
||||
if file_hash:
|
||||
resolved_path = db.search_hash(file_hash)
|
||||
else:
|
||||
p = Path(raw)
|
||||
resolved_path = p if p.is_absolute() else (store_root / p)
|
||||
|
||||
if resolved_path is None:
|
||||
debug(f"delete_file: could not resolve identifier: {raw}")
|
||||
return False
|
||||
|
||||
# Delete from database (also cleans up relationship backlinks).
|
||||
db.delete_file(resolved_path)
|
||||
|
||||
# Delete the actual file from disk (best-effort).
|
||||
try:
|
||||
if resolved_path.exists():
|
||||
resolved_path.unlink()
|
||||
debug(f"Deleted file: {resolved_path}")
|
||||
else:
|
||||
debug(f"File not found on disk: {resolved_path}")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return True
|
||||
except Exception as exc:
|
||||
debug(f"delete_file failed: {exc}")
|
||||
return False
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import shutil
|
||||
import sys
|
||||
from collections.abc import Iterable as IterableABC
|
||||
|
||||
@@ -1275,6 +1276,233 @@ def get_pipe_object_path(pipe_object: Any) -> Optional[str]:
|
||||
return None
|
||||
|
||||
|
||||
def _extract_flag_value(args: Sequence[str], *flags: str) -> Optional[str]:
|
||||
"""Return the value for the first matching flag in args.
|
||||
|
||||
This is intentionally lightweight (no cmdlet spec required) so callers in CLI/pipeline
|
||||
can share the same behavior.
|
||||
"""
|
||||
if not args:
|
||||
return None
|
||||
want = {str(f).strip().lower() for f in flags if str(f).strip()}
|
||||
if not want:
|
||||
return None
|
||||
try:
|
||||
tokens = [str(a) for a in args]
|
||||
except Exception:
|
||||
tokens = list(args) # type: ignore[list-item]
|
||||
for i, tok in enumerate(tokens):
|
||||
low = str(tok).strip().lower()
|
||||
if low in want:
|
||||
if i + 1 >= len(tokens):
|
||||
return None
|
||||
nxt = str(tokens[i + 1])
|
||||
# Allow paths like "-"? Treat missing value as None.
|
||||
if not nxt.strip():
|
||||
return None
|
||||
# Don't consume another flag as value.
|
||||
if nxt.startswith("-"):
|
||||
return None
|
||||
return nxt
|
||||
return None
|
||||
|
||||
|
||||
def _unique_destination_path(dest: Path) -> Path:
|
||||
"""Generate a non-colliding destination path by appending " (N)"."""
|
||||
try:
|
||||
if not dest.exists():
|
||||
return dest
|
||||
except Exception:
|
||||
return dest
|
||||
|
||||
parent = dest.parent
|
||||
stem = dest.stem
|
||||
suffix = dest.suffix
|
||||
for i in range(1, 10_000):
|
||||
candidate = parent / f"{stem} ({i}){suffix}"
|
||||
try:
|
||||
if not candidate.exists():
|
||||
return candidate
|
||||
except Exception:
|
||||
return candidate
|
||||
return dest
|
||||
|
||||
|
||||
def apply_output_path_from_pipeobjects(
|
||||
*,
|
||||
cmd_name: str,
|
||||
args: Sequence[str],
|
||||
emits: Sequence[Any],
|
||||
) -> List[Any]:
|
||||
"""If the user supplied `-path`, move emitted temp/PATH files there.
|
||||
|
||||
This enables a dynamic pattern:
|
||||
- Any cmdlet can include `SharedArgs.PATH`.
|
||||
- If it emits a file-backed PipeObject (`path` exists on disk) and the item is
|
||||
a temp/PATH artifact, then `-path <dest>` will save it to that location.
|
||||
|
||||
Rules:
|
||||
- Only affects items whose `action` matches the current cmdlet.
|
||||
- Only affects items that look like local artifacts (`is_temp` True or `store` == PATH).
|
||||
- Updates the emitted object's `path` (and `target` when it points at the same file).
|
||||
"""
|
||||
dest_raw = _extract_flag_value(args, "-path", "--path")
|
||||
if not dest_raw:
|
||||
return list(emits or [])
|
||||
|
||||
cmd_norm = str(cmd_name or "").replace("_", "-").strip().lower()
|
||||
if not cmd_norm:
|
||||
return list(emits or [])
|
||||
|
||||
try:
|
||||
dest_hint_dir = str(dest_raw).endswith(("/", "\\"))
|
||||
except Exception:
|
||||
dest_hint_dir = False
|
||||
|
||||
try:
|
||||
dest_path = Path(str(dest_raw)).expanduser()
|
||||
except Exception:
|
||||
return list(emits or [])
|
||||
|
||||
items = list(emits or [])
|
||||
# Identify which emitted items are actually file artifacts produced by this cmdlet.
|
||||
artifact_indices: List[int] = []
|
||||
artifact_paths: List[Path] = []
|
||||
for idx, item in enumerate(items):
|
||||
action = str(get_field(item, "action", "") or "").strip().lower()
|
||||
if not action.startswith("cmdlet:"):
|
||||
continue
|
||||
action_name = action.split(":", 1)[-1].strip().lower()
|
||||
if action_name != cmd_norm:
|
||||
continue
|
||||
|
||||
store = str(get_field(item, "store", "") or "").strip().lower()
|
||||
is_temp = bool(get_field(item, "is_temp", False))
|
||||
if not (is_temp or store == "path"):
|
||||
continue
|
||||
|
||||
src_str = get_pipe_object_path(item)
|
||||
if not src_str:
|
||||
continue
|
||||
try:
|
||||
src = Path(str(src_str)).expanduser()
|
||||
except Exception:
|
||||
continue
|
||||
try:
|
||||
if not src.exists() or not src.is_file():
|
||||
continue
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
artifact_indices.append(idx)
|
||||
artifact_paths.append(src)
|
||||
|
||||
if not artifact_indices:
|
||||
return items
|
||||
|
||||
# Decide whether the destination is a directory or a single file.
|
||||
if len(artifact_indices) > 1:
|
||||
# Multiple artifacts: always treat destination as a directory.
|
||||
if dest_path.suffix:
|
||||
dest_dir = dest_path.parent
|
||||
else:
|
||||
dest_dir = dest_path
|
||||
try:
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
except Exception as exc:
|
||||
log(f"Failed to create destination directory: {dest_dir} ({exc})", file=sys.stderr)
|
||||
return items
|
||||
|
||||
for idx, src in zip(artifact_indices, artifact_paths):
|
||||
final = dest_dir / src.name
|
||||
final = _unique_destination_path(final)
|
||||
try:
|
||||
if src.resolve() == final.resolve():
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
shutil.move(str(src), str(final))
|
||||
except Exception as exc:
|
||||
log(f"Failed to save output to {final}: {exc}", file=sys.stderr)
|
||||
continue
|
||||
_apply_saved_path_update(items[idx], old_path=str(src), new_path=str(final))
|
||||
|
||||
return items
|
||||
|
||||
# Single artifact: destination can be a directory or a concrete file path.
|
||||
src = artifact_paths[0]
|
||||
idx = artifact_indices[0]
|
||||
final: Path
|
||||
try:
|
||||
if dest_hint_dir or (dest_path.exists() and dest_path.is_dir()):
|
||||
final = dest_path / src.name
|
||||
else:
|
||||
final = dest_path
|
||||
except Exception:
|
||||
final = dest_path
|
||||
|
||||
try:
|
||||
final.parent.mkdir(parents=True, exist_ok=True)
|
||||
except Exception as exc:
|
||||
log(f"Failed to create destination directory: {final.parent} ({exc})", file=sys.stderr)
|
||||
return items
|
||||
|
||||
final = _unique_destination_path(final)
|
||||
try:
|
||||
if src.resolve() != final.resolve():
|
||||
shutil.move(str(src), str(final))
|
||||
except Exception as exc:
|
||||
log(f"Failed to save output to {final}: {exc}", file=sys.stderr)
|
||||
return items
|
||||
|
||||
_apply_saved_path_update(items[idx], old_path=str(src), new_path=str(final))
|
||||
return items
|
||||
|
||||
|
||||
def _apply_saved_path_update(item: Any, *, old_path: str, new_path: str) -> None:
|
||||
"""Update a PipeObject-like item after its backing file has moved."""
|
||||
old_str = str(old_path)
|
||||
new_str = str(new_path)
|
||||
if isinstance(item, dict):
|
||||
try:
|
||||
if str(item.get("path") or "") == old_str:
|
||||
item["path"] = new_str
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
if str(item.get("target") or "") == old_str:
|
||||
item["target"] = new_str
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
extra = item.get("extra")
|
||||
if isinstance(extra, dict):
|
||||
if str(extra.get("target") or "") == old_str:
|
||||
extra["target"] = new_str
|
||||
if str(extra.get("path") or "") == old_str:
|
||||
extra["path"] = new_str
|
||||
except Exception:
|
||||
pass
|
||||
return
|
||||
|
||||
# models.PipeObject or PipeObject-ish
|
||||
try:
|
||||
if getattr(item, "path", None) == old_str:
|
||||
setattr(item, "path", new_str)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
extra = getattr(item, "extra", None)
|
||||
if isinstance(extra, dict):
|
||||
if str(extra.get("target") or "") == old_str:
|
||||
extra["target"] = new_str
|
||||
if str(extra.get("path") or "") == old_str:
|
||||
extra["path"] = new_str
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def get_pipe_object_hash(pipe_object: Any) -> Optional[str]:
|
||||
"""Extract file hash from PipeObject, dict, or pipeline-friendly object."""
|
||||
if pipe_object is None:
|
||||
|
||||
@@ -123,6 +123,70 @@ class Delete_File(sh.Cmdlet):
|
||||
local_deleted = False
|
||||
local_target = isinstance(target, str) and target.strip() and not str(target).lower().startswith(("http://", "https://"))
|
||||
deleted_rows: List[Dict[str, Any]] = []
|
||||
|
||||
# If this item references a configured non-Hydrus store backend, prefer deleting
|
||||
# via the backend API. This supports store items where `path`/`target` is the hash.
|
||||
if conserve != "local" and store and (not is_hydrus_store):
|
||||
try:
|
||||
registry = Store(config)
|
||||
if registry.is_available(str(store)):
|
||||
backend = registry[str(store)]
|
||||
|
||||
# Prefer hash when available.
|
||||
hash_candidate = sh.normalize_hash(hash_hex_raw) if hash_hex_raw else None
|
||||
if not hash_candidate and isinstance(target, str):
|
||||
hash_candidate = sh.normalize_hash(target)
|
||||
|
||||
resolved_path = None
|
||||
try:
|
||||
if hash_candidate and hasattr(backend, "get_file"):
|
||||
resolved_path = backend.get_file(hash_candidate)
|
||||
except Exception:
|
||||
resolved_path = None
|
||||
|
||||
identifier = hash_candidate or (str(target).strip() if isinstance(target, str) else "")
|
||||
if identifier:
|
||||
deleter = getattr(backend, "delete_file", None)
|
||||
if callable(deleter) and bool(deleter(identifier)):
|
||||
local_deleted = True
|
||||
|
||||
size_bytes: int | None = None
|
||||
try:
|
||||
if resolved_path is not None and isinstance(resolved_path, Path) and resolved_path.exists():
|
||||
size_bytes = int(resolved_path.stat().st_size)
|
||||
except Exception:
|
||||
size_bytes = None
|
||||
|
||||
deleted_rows.append(
|
||||
{
|
||||
"title": str(title_val).strip() if title_val else (resolved_path.name if resolved_path else identifier),
|
||||
"store": store_label,
|
||||
"hash": hash_candidate or (hash_hex or ""),
|
||||
"size_bytes": size_bytes,
|
||||
"ext": _get_ext_from_item() or (resolved_path.suffix.lstrip(".") if resolved_path else ""),
|
||||
}
|
||||
)
|
||||
|
||||
# Best-effort remove sidecars if we know the resolved path.
|
||||
try:
|
||||
if resolved_path is not None and isinstance(resolved_path, Path):
|
||||
for sidecar in (
|
||||
resolved_path.with_suffix(".tag"),
|
||||
resolved_path.with_suffix(".metadata"),
|
||||
resolved_path.with_suffix(".notes"),
|
||||
):
|
||||
try:
|
||||
if sidecar.exists() and sidecar.is_file():
|
||||
sidecar.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Skip legacy local-path deletion below.
|
||||
local_target = False
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if conserve != "local" and local_target:
|
||||
path = Path(str(target))
|
||||
|
||||
@@ -108,6 +108,21 @@ def _set_pipe_percent(percent: int) -> None:
|
||||
return
|
||||
|
||||
|
||||
def _print_table_suspended(table: Any) -> None:
|
||||
"""Print a Rich table while pausing Live progress if active."""
|
||||
suspend = getattr(pipeline_context, "suspend_live_progress", None)
|
||||
cm: AbstractContextManager[Any] = nullcontext()
|
||||
if callable(suspend):
|
||||
try:
|
||||
maybe_cm = suspend()
|
||||
if maybe_cm is not None:
|
||||
cm = maybe_cm # type: ignore[assignment]
|
||||
except Exception:
|
||||
cm = nullcontext()
|
||||
with cm:
|
||||
get_stderr_console().print(table)
|
||||
|
||||
|
||||
# Minimal inlined helpers from helper/download.py (is_url_supported_by_ytdlp, list_formats)
|
||||
try:
|
||||
import yt_dlp # type: ignore
|
||||
@@ -1231,6 +1246,18 @@ class Download_Media(Cmdlet):
|
||||
return sh.parse_single_hash_query(f"hash:{hash_candidate}")
|
||||
|
||||
# Backwards-compatible: treat a non-keyed query as a hash query.
|
||||
# If the query uses keyed specs (e.g. format:, item:, clip:), do NOT attempt
|
||||
# to interpret the whole string as a hash.
|
||||
try:
|
||||
has_non_hash_keys = bool(
|
||||
query_keyed
|
||||
and isinstance(query_keyed, dict)
|
||||
and any(k for k in query_keyed.keys() if str(k).strip().lower() != "hash")
|
||||
)
|
||||
except Exception:
|
||||
has_non_hash_keys = False
|
||||
if has_non_hash_keys:
|
||||
return None
|
||||
return sh.parse_single_hash_query(str(query_spec)) if query_spec else None
|
||||
except Exception:
|
||||
return None
|
||||
@@ -1315,6 +1342,117 @@ class Download_Media(Cmdlet):
|
||||
formats_cache[key] = fmts
|
||||
return fmts
|
||||
|
||||
def _is_browseable_format(self, fmt: Any) -> bool:
|
||||
"""Return True for formats that are sensible to show in the format table."""
|
||||
if not isinstance(fmt, dict):
|
||||
return False
|
||||
format_id = str(fmt.get("format_id") or "").strip()
|
||||
if not format_id:
|
||||
return False
|
||||
ext = str(fmt.get("ext") or "").strip().lower()
|
||||
if ext in {"mhtml", "json"}:
|
||||
return False
|
||||
note = str(fmt.get("format_note") or "").lower()
|
||||
if "storyboard" in note:
|
||||
return False
|
||||
if format_id.lower().startswith("sb"):
|
||||
return False
|
||||
vcodec = str(fmt.get("vcodec", "none"))
|
||||
acodec = str(fmt.get("acodec", "none"))
|
||||
# Keep anything with at least one stream.
|
||||
return not (vcodec == "none" and acodec == "none")
|
||||
|
||||
def _format_id_for_query_index(
|
||||
self,
|
||||
query_format: str,
|
||||
url: str,
|
||||
formats_cache: Dict[str, Optional[List[Dict[str, Any]]]],
|
||||
ytdlp_tool: YtDlpTool,
|
||||
) -> Optional[str]:
|
||||
"""Resolve a numeric 'format:N' query into an actual yt-dlp format selector.
|
||||
|
||||
Acceptable forms: '7', '#7', ' 7 ' (whitespace allowed). Uses the same
|
||||
browseable filtering rules as the interactive table and selects the
|
||||
1-based index. Returns a yt-dlp format string (possibly with +ba added
|
||||
for video-only formats). Raises ValueError when the index is invalid or
|
||||
formats cannot be listed.
|
||||
"""
|
||||
import re
|
||||
|
||||
if not query_format or not re.match(r"^\s*#?\d+\s*$", str(query_format)):
|
||||
return None
|
||||
|
||||
try:
|
||||
idx = int(str(query_format).lstrip("#").strip())
|
||||
except Exception:
|
||||
raise ValueError(f"Invalid format index: {query_format}")
|
||||
|
||||
fmts = self._list_formats_cached(
|
||||
url,
|
||||
playlist_items_value=None,
|
||||
formats_cache=formats_cache,
|
||||
ytdlp_tool=ytdlp_tool,
|
||||
)
|
||||
if not fmts:
|
||||
raise ValueError("Unable to list formats for the URL; cannot resolve numeric format index")
|
||||
|
||||
candidate_formats = [f for f in fmts if self._is_browseable_format(f)]
|
||||
filtered_formats = candidate_formats if candidate_formats else list(fmts)
|
||||
|
||||
if not filtered_formats:
|
||||
raise ValueError("No formats available for selection")
|
||||
|
||||
if idx <= 0 or idx > len(filtered_formats):
|
||||
raise ValueError(f"Format index {idx} out of range (1..{len(filtered_formats)})")
|
||||
|
||||
chosen = filtered_formats[idx - 1]
|
||||
selection_format_id = str(chosen.get("format_id") or "").strip()
|
||||
if not selection_format_id:
|
||||
raise ValueError("Selected format has no format_id")
|
||||
|
||||
try:
|
||||
vcodec = str(chosen.get("vcodec", "none"))
|
||||
acodec = str(chosen.get("acodec", "none"))
|
||||
if vcodec != "none" and acodec == "none":
|
||||
selection_format_id = f"{selection_format_id}+ba"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return selection_format_id
|
||||
|
||||
@staticmethod
|
||||
def _format_selector_for_query_height(query_format: str) -> Optional[str]:
|
||||
"""Translate a query value like '720p' into a yt-dlp -f selector.
|
||||
|
||||
Returns a selector that chooses the best video at or under the requested
|
||||
height and always pairs it with audio.
|
||||
|
||||
Example: '640p' -> 'bv*[height<=640]+ba'
|
||||
|
||||
Notes:
|
||||
- Only the '<digits>p' form is treated as a height cap to avoid
|
||||
ambiguity with numeric format IDs and numeric index selection.
|
||||
"""
|
||||
import re
|
||||
|
||||
if query_format is None:
|
||||
return None
|
||||
|
||||
s = str(query_format).strip().lower()
|
||||
m = re.match(r"^(\d{2,5})p$", s)
|
||||
if not m:
|
||||
return None
|
||||
|
||||
try:
|
||||
height = int(m.group(1))
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
if height <= 0:
|
||||
raise ValueError(f"Invalid height selection: {query_format}")
|
||||
|
||||
return f"bv*[height<={height}]+ba"
|
||||
|
||||
@staticmethod
|
||||
def _canonicalize_url_for_storage(*, requested_url: str, ytdlp_tool: YtDlpTool, playlist_items: Optional[str]) -> str:
|
||||
# Prefer yt-dlp's canonical webpage URL (e.g. strips timestamps/redirects).
|
||||
@@ -1778,11 +1916,22 @@ class Download_Media(Cmdlet):
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
get_stderr_console().print(table)
|
||||
_print_table_suspended(table)
|
||||
setattr(table, "_rendered_by_cmdlet", True)
|
||||
|
||||
if not Confirm.ask("Continue anyway?", default=False, console=get_stderr_console()):
|
||||
return False
|
||||
suspend = getattr(pipeline_context, "suspend_live_progress", None)
|
||||
cm: AbstractContextManager[Any] = nullcontext()
|
||||
if callable(suspend):
|
||||
try:
|
||||
maybe_cm = suspend()
|
||||
if maybe_cm is not None:
|
||||
cm = maybe_cm # type: ignore[assignment]
|
||||
except Exception:
|
||||
cm = nullcontext()
|
||||
|
||||
with cm:
|
||||
if not Confirm.ask("Continue anyway?", default=False, console=get_stderr_console()):
|
||||
return False
|
||||
return True
|
||||
|
||||
def _maybe_show_playlist_table(self, *, url: str, ytdlp_tool: YtDlpTool) -> bool:
|
||||
@@ -1869,7 +2018,7 @@ class Download_Media(Cmdlet):
|
||||
pipeline_context.set_current_stage_table(table)
|
||||
pipeline_context.set_last_result_table(table, results_list)
|
||||
|
||||
get_stderr_console().print(table)
|
||||
_print_table_suspended(table)
|
||||
setattr(table, "_rendered_by_cmdlet", True)
|
||||
return True
|
||||
|
||||
@@ -2048,7 +2197,7 @@ class Download_Media(Cmdlet):
|
||||
table.add_result(format_dict)
|
||||
|
||||
try:
|
||||
get_stderr_console().print(table)
|
||||
_print_table_suspended(table)
|
||||
setattr(table, "_rendered_by_cmdlet", True)
|
||||
except Exception:
|
||||
pass
|
||||
@@ -2326,7 +2475,7 @@ class Download_Media(Cmdlet):
|
||||
pipeline_context.set_last_result_table(table, results_list)
|
||||
|
||||
try:
|
||||
get_stderr_console().print(table)
|
||||
_print_table_suspended(table)
|
||||
setattr(table, "_rendered_by_cmdlet", True)
|
||||
except Exception:
|
||||
pass
|
||||
@@ -2499,6 +2648,7 @@ class Download_Media(Cmdlet):
|
||||
# -query "hash:<sha256>"
|
||||
# -query "clip:1m-1m15s,2m1s-2m11s"
|
||||
# -query "hash:<sha256>,clip:1m-1m15s,item:2-3"
|
||||
# -query "format:audio,item:1-3" (audio-only + playlist selection)
|
||||
query_keyed = self._parse_query_keyed_spec(str(query_spec) if query_spec is not None else None)
|
||||
|
||||
# Optional: allow an explicit hash via -query "hash:<sha256>".
|
||||
@@ -2512,7 +2662,27 @@ class Download_Media(Cmdlet):
|
||||
embed_chapters = True
|
||||
write_sub = True
|
||||
|
||||
mode = "audio" if parsed.get("audio") else "video"
|
||||
# QueryArgs:
|
||||
# - format:audio => audio-only (highest quality audio)
|
||||
# - format:<ytdlp-format> => equivalent to -format <ytdlp-format>
|
||||
query_format: Optional[str] = None
|
||||
try:
|
||||
fmt_values = query_keyed.get("format", []) if isinstance(query_keyed, dict) else []
|
||||
fmt_candidate = fmt_values[-1] if fmt_values else None
|
||||
if fmt_candidate is not None:
|
||||
query_format = str(fmt_candidate).strip()
|
||||
except Exception:
|
||||
query_format = None
|
||||
|
||||
query_wants_audio = False
|
||||
if query_format:
|
||||
try:
|
||||
query_wants_audio = str(query_format).strip().lower() == "audio"
|
||||
except Exception:
|
||||
query_wants_audio = False
|
||||
|
||||
# Explicit CLI flag wins; else query format:audio can select audio mode.
|
||||
mode = "audio" if (parsed.get("audio") or query_wants_audio) else "video"
|
||||
|
||||
clip_ranges, clip_invalid, clip_values = self._parse_clip_ranges_and_apply_items(
|
||||
clip_spec=str(clip_spec) if clip_spec is not None else None,
|
||||
@@ -2534,19 +2704,84 @@ class Download_Media(Cmdlet):
|
||||
storage, hydrus_available = self._init_storage(config if isinstance(config, dict) else {})
|
||||
|
||||
# Check if we need to show format selection
|
||||
formats_cache: Dict[str, Optional[List[Dict[str, Any]]]] = {}
|
||||
playlist_items = str(parsed.get("item")) if parsed.get("item") else None
|
||||
ytdl_format = parsed.get("format")
|
||||
# If user didn't pass -format, allow -query "format:<...>" to provide it.
|
||||
# Supported query forms:
|
||||
# - format:audio => audio-only mode (handled above)
|
||||
# - format:720p => pick best video <= 720p and always include audio
|
||||
# - format:<ytdlp -f> => treated as a raw yt-dlp selector (non-numeric)
|
||||
# - format:<N> => treated as a 1-based index into the shown format list (resolved below)
|
||||
if not ytdl_format and query_format and not query_wants_audio:
|
||||
try:
|
||||
height_selector = self._format_selector_for_query_height(query_format)
|
||||
except ValueError as e:
|
||||
log(f"Error parsing format selection: {e}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
if height_selector:
|
||||
ytdl_format = height_selector
|
||||
else:
|
||||
import re
|
||||
|
||||
# Preserve numeric index selection and avoid ambiguity with numeric format IDs.
|
||||
if not re.match(r"^\s*#?\d+\s*$", str(query_format)):
|
||||
ytdl_format = query_format
|
||||
playlist_selection_handled = False
|
||||
|
||||
# Playlist/multi-entry detection: if the URL has multiple items and the user didn't
|
||||
# specify -item or -format, show a normal selectable table and return.
|
||||
if len(supported_url) == 1 and not playlist_items and not ytdl_format:
|
||||
candidate_url = supported_url[0]
|
||||
if self._maybe_show_playlist_table(url=candidate_url, ytdlp_tool=ytdlp_tool):
|
||||
playlist_selection_handled = True
|
||||
# Let the user pick items using the normal REPL prompt:
|
||||
# @* | download-media ...
|
||||
return 0
|
||||
|
||||
# Support numeric index selection via -query "format:<N>" where N is 1-based index
|
||||
# into the filtered format list (e.g., -query "format:7" selects the 7th listed format).
|
||||
# This allows non-interactive invocation from shells (PowerShell treats '@' specially).
|
||||
if query_format and not query_wants_audio:
|
||||
try:
|
||||
idx_fmt = self._format_id_for_query_index(query_format, candidate_url, formats_cache, ytdlp_tool)
|
||||
except ValueError as e:
|
||||
log(f"Error parsing format selection: {e}", file=sys.stderr)
|
||||
return 1
|
||||
if idx_fmt:
|
||||
debug(f"Resolved numeric format selection '{query_format}' -> {idx_fmt}")
|
||||
ytdl_format = idx_fmt
|
||||
|
||||
if not ytdl_format:
|
||||
if self._maybe_show_playlist_table(url=candidate_url, ytdlp_tool=ytdlp_tool):
|
||||
playlist_selection_handled = True
|
||||
# Let the user pick items using the normal REPL prompt:
|
||||
# @* | download-media ...
|
||||
|
||||
# If we printed a format table, give a quick hint for non-interactive selection.
|
||||
try:
|
||||
last_table = pipeline_context.get_last_result_table() if hasattr(pipeline_context, "get_last_result_table") else None
|
||||
if hasattr(last_table, "rows") and getattr(last_table, "rows", None):
|
||||
# Build user-friendly examples using the base command we already constructed
|
||||
sample_index = 1
|
||||
sample_fmt_id = None
|
||||
try:
|
||||
sample_row = last_table.rows[0]
|
||||
sample_fmt_id = sample_row._full_metadata.get("item_selector") if getattr(sample_row, "_full_metadata", None) else None
|
||||
except Exception:
|
||||
sample_fmt_id = None
|
||||
|
||||
try:
|
||||
# Use single quotes inside the outer quotes so PowerShell doesn't interpret the pipe character
|
||||
sample_pipeline = base_cmd.replace(f'"{candidate_url}"', f"'{candidate_url}'")
|
||||
hint = (
|
||||
"To select non-interactively, re-run with an explicit format: "
|
||||
"e.g. mm \"{pipeline} -format {fmt} | add-file -store <store>\" or "
|
||||
"mm \"{pipeline} -query 'format:{index}' | add-file -store <store>\""
|
||||
).format(pipeline=sample_pipeline, fmt=sample_fmt_id or "<format_id>", index=sample_index)
|
||||
log(hint, file=sys.stderr)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return 0
|
||||
|
||||
# Bulk preflight for playlist selections (per-entry URLs): check all URLs once before downloading.
|
||||
skip_per_url_preflight = False
|
||||
@@ -2563,7 +2798,7 @@ class Download_Media(Cmdlet):
|
||||
# Playlist-level format preflight: if the batch has only one available format,
|
||||
# discover it once and force it for every item. This avoids per-item failures
|
||||
# and per-item --list-formats calls (e.g. Bandcamp albums).
|
||||
formats_cache: Dict[str, Optional[List[Dict[str, Any]]]] = {}
|
||||
|
||||
|
||||
forced_single_format_id: Optional[str] = None
|
||||
forced_single_format_for_batch = False
|
||||
|
||||
@@ -122,6 +122,8 @@ class Get_Note(Cmdlet):
|
||||
for k in sorted(notes.keys(), key=lambda x: str(x).lower()):
|
||||
v = notes.get(k)
|
||||
raw_text = str(v or "")
|
||||
# Keep payload small for IPC/pipes.
|
||||
raw_text = raw_text[:999]
|
||||
preview = " ".join(raw_text.replace("\r", "").split("\n"))
|
||||
ctx.emit(
|
||||
{
|
||||
|
||||
@@ -401,7 +401,13 @@ def _run(result: Any, _args: Sequence[str], config: Dict[str, Any]) -> int:
|
||||
log(f"Hydrus relationships fetch failed: {exc}", file=sys.stderr)
|
||||
|
||||
if not found_relationships:
|
||||
log("No relationships found.")
|
||||
try:
|
||||
from rich.panel import Panel
|
||||
from rich_display import stdout_console
|
||||
title = source_title or (hash_hex[:16] + "..." if hash_hex else "Item")
|
||||
stdout_console().print(Panel(f"{title} has no relationships", title="Relationships"))
|
||||
except Exception:
|
||||
log("No relationships found.")
|
||||
return 0
|
||||
|
||||
# Display results
|
||||
|
||||
@@ -1060,6 +1060,7 @@ CMDLET = Cmdlet(
|
||||
SharedArgs.URL,
|
||||
CmdletArg(name="format", type="string", description="Output format: webp, png, jpeg, or pdf"),
|
||||
CmdletArg(name="selector", type="string", description="CSS selector for element capture"),
|
||||
SharedArgs.PATH
|
||||
|
||||
],
|
||||
detail=[
|
||||
|
||||
@@ -387,16 +387,10 @@ class Search_Store(Cmdlet):
|
||||
results = target_backend.search(query, limit=limit)
|
||||
debug(f"[search-store] '{backend_to_search}' -> {len(results or [])} result(s)")
|
||||
else:
|
||||
from API.HydrusNetwork import is_hydrus_available
|
||||
hydrus_available = is_hydrus_available(config or {})
|
||||
from Store.HydrusNetwork import HydrusNetwork
|
||||
|
||||
all_results = []
|
||||
for backend_name in storage.list_searchable_backends():
|
||||
try:
|
||||
backend = storage[backend_name]
|
||||
if isinstance(backend, HydrusNetwork) and not hydrus_available:
|
||||
continue
|
||||
searched_backends.append(backend_name)
|
||||
|
||||
debug(f"[search-store] Searching '{backend_name}'")
|
||||
|
||||
@@ -1,102 +1,202 @@
|
||||
"""Trim a media file using ffmpeg."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Dict, Sequence, List, Optional
|
||||
from typing import Any, Dict, Sequence, Optional
|
||||
from pathlib import Path
|
||||
import sys
|
||||
import json
|
||||
import subprocess
|
||||
import shutil
|
||||
import re
|
||||
import time
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from SYS.logger import log, debug
|
||||
from SYS.utils import sha256_file
|
||||
from . import _shared as sh
|
||||
from Store import Store
|
||||
|
||||
Cmdlet = sh.Cmdlet
|
||||
CmdletArg = sh.CmdletArg
|
||||
parse_cmdlet_args = sh.parse_cmdlet_args
|
||||
normalize_result_input = sh.normalize_result_input
|
||||
extract_tag_from_result = sh.extract_tag_from_result
|
||||
extract_title_from_result = sh.extract_title_from_result
|
||||
extract_url_from_result = sh.extract_url_from_result
|
||||
get_field = sh.get_field
|
||||
import pipeline as ctx
|
||||
|
||||
CMDLET = Cmdlet(
|
||||
name="trim-file",
|
||||
summary="Trim a media file using ffmpeg.",
|
||||
usage="trim-file [-path <path>] -range <start-end> [-delete]",
|
||||
usage="trim-file [-path <path>] [-input <path-or-url>] -range <start-end> [-outdir <dir>] [-delete]",
|
||||
arg=[
|
||||
CmdletArg("-path", description="Path to the file (optional if piped)."),
|
||||
CmdletArg("-range", required=True, description="Time range to trim (e.g. '3:45-3:55' or '00:03:45-00:03:55')."),
|
||||
CmdletArg("-input", description="Override input media source (path or URL). Useful when piping store metadata but trimming from an mpv stream URL."),
|
||||
CmdletArg("-range", required=True, description="Time range to trim (e.g. '3:45-3:55', '00:03:45-00:03:55', or '1h3m-1h10m30s')."),
|
||||
CmdletArg("-outdir", description="Output directory for the clip (defaults to source folder for local files; otherwise uses config temp/videos)."),
|
||||
CmdletArg("-delete", type="flag", description="Delete the original file after trimming."),
|
||||
],
|
||||
detail=[
|
||||
"Creates a new file with 'clip_' prefix in the filename/title.",
|
||||
"Creates a new file with 'clip_' prefix in the filename.",
|
||||
"Adds the trim range to the title as: [1h3m-1h3m10s] - <title>.",
|
||||
"Inherits tag values from the source file.",
|
||||
"Adds a relationship to the source file (if hash is available).",
|
||||
"Output can be piped to add-file.",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def _format_hms(total_seconds: float) -> str:
|
||||
"""Format seconds as compact h/m/s (no colons), e.g. 1h3m10s, 3m5s, 2s."""
|
||||
try:
|
||||
total = int(round(float(total_seconds)))
|
||||
except Exception:
|
||||
total = 0
|
||||
if total < 0:
|
||||
total = 0
|
||||
|
||||
hours = total // 3600
|
||||
minutes = (total % 3600) // 60
|
||||
seconds = total % 60
|
||||
|
||||
parts: list[str] = []
|
||||
if hours > 0:
|
||||
parts.append(f"{hours}h")
|
||||
if minutes > 0:
|
||||
parts.append(f"{minutes}m")
|
||||
if seconds > 0:
|
||||
parts.append(f"{seconds}s")
|
||||
|
||||
# Ensure we always output something.
|
||||
if not parts:
|
||||
return "0s"
|
||||
return "".join(parts)
|
||||
|
||||
def _is_url(value: str) -> bool:
|
||||
try:
|
||||
p = urlparse(str(value))
|
||||
return bool(p.scheme and p.netloc)
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def _parse_time(time_str: str) -> float:
|
||||
"""Convert time string (HH:MM:SS or MM:SS or SS) to seconds."""
|
||||
parts = time_str.strip().split(':')
|
||||
"""Convert time string into seconds.
|
||||
|
||||
Supports:
|
||||
- HH:MM:SS(.sss)
|
||||
- MM:SS(.sss)
|
||||
- SS(.sss)
|
||||
- 1h3m53s (also 1h3m, 3m53s, 53s)
|
||||
"""
|
||||
raw = str(time_str or '').strip()
|
||||
if not raw:
|
||||
raise ValueError("Empty time")
|
||||
|
||||
# h/m/s format (case-insensitive)
|
||||
hms = re.fullmatch(
|
||||
r"(?i)\s*(?:(?P<h>\d+(?:\.\d+)?)h)?(?:(?P<m>\d+(?:\.\d+)?)m)?(?:(?P<s>\d+(?:\.\d+)?)s)?\s*",
|
||||
raw,
|
||||
)
|
||||
if hms and (hms.group('h') or hms.group('m') or hms.group('s')):
|
||||
hours = float(hms.group('h') or 0)
|
||||
minutes = float(hms.group('m') or 0)
|
||||
seconds = float(hms.group('s') or 0)
|
||||
total = hours * 3600 + minutes * 60 + seconds
|
||||
return float(total)
|
||||
|
||||
# Colon-separated
|
||||
parts = [p.strip() for p in raw.split(':')]
|
||||
if len(parts) == 3:
|
||||
return float(parts[0]) * 3600 + float(parts[1]) * 60 + float(parts[2])
|
||||
elif len(parts) == 2:
|
||||
if len(parts) == 2:
|
||||
return float(parts[0]) * 60 + float(parts[1])
|
||||
elif len(parts) == 1:
|
||||
if len(parts) == 1:
|
||||
return float(parts[0])
|
||||
else:
|
||||
raise ValueError(f"Invalid time format: {time_str}")
|
||||
|
||||
def _trim_media(input_path: Path, output_path: Path, start_time: str, end_time: str) -> bool:
|
||||
"""Trim media file using ffmpeg."""
|
||||
raise ValueError(f"Invalid time format: {time_str}")
|
||||
|
||||
|
||||
def _sanitize_filename(name: str, *, max_len: int = 140) -> str:
|
||||
name = str(name or '').strip()
|
||||
if not name:
|
||||
return 'clip'
|
||||
# Windows-forbidden characters: <>:"/\\|?* plus control chars
|
||||
name = re.sub('[<>:"/\\\\|?*\\x00-\\x1F]', '_', name)
|
||||
name = re.sub(r"\s+", " ", name).strip()
|
||||
name = name.rstrip('.')
|
||||
if not name:
|
||||
return 'clip'
|
||||
if len(name) > max_len:
|
||||
name = name[:max_len].rstrip()
|
||||
return name
|
||||
|
||||
|
||||
def _extract_store_name(item: Any) -> Optional[str]:
|
||||
try:
|
||||
store_val = get_field(item, "store")
|
||||
s = str(store_val or "").strip()
|
||||
return s if s else None
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _persist_alt_relationship(*, config: Dict[str, Any], store_name: str, alt_hash: str, king_hash: str) -> None:
|
||||
"""Persist directional alt -> king relationship in the given backend."""
|
||||
try:
|
||||
store = Store(config)
|
||||
backend: Any = store[str(store_name)]
|
||||
except Exception:
|
||||
return
|
||||
|
||||
alt_norm = str(alt_hash or "").strip().lower()
|
||||
king_norm = str(king_hash or "").strip().lower()
|
||||
if len(alt_norm) != 64 or len(king_norm) != 64 or alt_norm == king_norm:
|
||||
return
|
||||
|
||||
# Folder-backed local DB
|
||||
try:
|
||||
if type(backend).__name__ == "Folder" and hasattr(backend, "location") and callable(getattr(backend, "location")):
|
||||
from API.folder import API_folder_store
|
||||
from pathlib import Path
|
||||
|
||||
root = Path(str(backend.location())).expanduser()
|
||||
with API_folder_store(root) as db:
|
||||
db.set_relationship_by_hash(alt_norm, king_norm, "alt", bidirectional=False)
|
||||
return
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Hydrus-like backend
|
||||
try:
|
||||
client = getattr(backend, "_client", None)
|
||||
if client is not None and hasattr(client, "set_relationship"):
|
||||
client.set_relationship(alt_norm, king_norm, "alt")
|
||||
except Exception:
|
||||
return
|
||||
|
||||
def _trim_media(input_source: str, output_path: Path, start_seconds: float, duration_seconds: float) -> bool:
|
||||
"""Trim media using ffmpeg.
|
||||
|
||||
input_source may be a local path or a URL.
|
||||
"""
|
||||
ffmpeg_path = shutil.which('ffmpeg')
|
||||
if not ffmpeg_path:
|
||||
log("ffmpeg not found in PATH", file=sys.stderr)
|
||||
return False
|
||||
|
||||
# Calculate duration to avoid seeking issues if possible, or just use -to
|
||||
# Using -ss before -i is faster (input seeking) but might be less accurate.
|
||||
# Using -ss after -i is slower (output seeking) but accurate.
|
||||
# For trimming, accuracy is usually preferred, but for long files input seeking is better.
|
||||
# We'll use input seeking (-ss before -i) and -to.
|
||||
|
||||
cmd = [
|
||||
ffmpeg_path, '-y',
|
||||
'-ss', start_time,
|
||||
'-i', str(input_path),
|
||||
'-to', end_time,
|
||||
'-c', 'copy', # Stream copy for speed and quality preservation
|
||||
'-map_metadata', '0', # Copy metadata
|
||||
str(output_path)
|
||||
]
|
||||
|
||||
# If stream copy fails (e.g. cutting not on keyframe), we might need re-encoding.
|
||||
# But let's try copy first as it's standard for "trimming" without quality loss.
|
||||
# Note: -to with input seeking (-ss before -i) resets timestamp, so -to refers to duration?
|
||||
# No, -to refers to position in output if used after -ss?
|
||||
# Actually, if -ss is before -i, the timestamps are reset to 0.
|
||||
# So -to should be (end - start).
|
||||
# Alternatively, use -t (duration).
|
||||
|
||||
try:
|
||||
s = _parse_time(start_time)
|
||||
e = _parse_time(end_time)
|
||||
duration = e - s
|
||||
if duration <= 0:
|
||||
log(f"Invalid range: start {start_time} >= end {end_time}", file=sys.stderr)
|
||||
if duration_seconds <= 0:
|
||||
log(f"Invalid range: duration <= 0 ({duration_seconds})", file=sys.stderr)
|
||||
return False
|
||||
|
||||
|
||||
cmd = [
|
||||
ffmpeg_path, '-y',
|
||||
'-ss', start_time,
|
||||
'-i', str(input_path),
|
||||
'-t', str(duration),
|
||||
'-ss', str(float(start_seconds)),
|
||||
'-i', str(input_source),
|
||||
'-t', str(float(duration_seconds)),
|
||||
'-c', 'copy',
|
||||
'-map_metadata', '0',
|
||||
str(output_path)
|
||||
str(output_path),
|
||||
]
|
||||
|
||||
debug(f"Running ffmpeg: {' '.join(cmd)}")
|
||||
@@ -121,10 +221,27 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
|
||||
log("Error: -range argument required (format: start-end)", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
start_str, end_str = range_arg.split('-', 1)
|
||||
start_str, end_str = [s.strip() for s in range_arg.split('-', 1)]
|
||||
if not start_str or not end_str:
|
||||
log("Error: -range must be start-end", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
try:
|
||||
start_seconds = _parse_time(start_str)
|
||||
end_seconds = _parse_time(end_str)
|
||||
except Exception as exc:
|
||||
log(f"Error parsing -range: {exc}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
duration_seconds = end_seconds - start_seconds
|
||||
if duration_seconds <= 0:
|
||||
log(f"Invalid range: start {start_str} >= end {end_str}", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
delete_original = parsed.get("delete", False)
|
||||
path_arg = parsed.get("path")
|
||||
input_override = parsed.get("input")
|
||||
outdir_arg = parsed.get("outdir")
|
||||
|
||||
# Collect inputs
|
||||
inputs = normalize_result_input(result)
|
||||
@@ -140,8 +257,10 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
|
||||
success_count = 0
|
||||
|
||||
for item in inputs:
|
||||
store_name = _extract_store_name(item)
|
||||
|
||||
# Resolve file path
|
||||
file_path = None
|
||||
file_path: Optional[str] = None
|
||||
if isinstance(item, dict):
|
||||
file_path = item.get("path") or item.get("target")
|
||||
elif hasattr(item, "path"):
|
||||
@@ -149,22 +268,84 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
|
||||
elif isinstance(item, str):
|
||||
file_path = item
|
||||
|
||||
if not file_path:
|
||||
if not file_path and not input_override:
|
||||
continue
|
||||
|
||||
media_source = str(input_override or file_path)
|
||||
is_url = _is_url(media_source)
|
||||
|
||||
path_obj: Optional[Path] = None
|
||||
if not is_url:
|
||||
try:
|
||||
path_obj = Path(str(media_source))
|
||||
except Exception:
|
||||
path_obj = None
|
||||
if not path_obj or not path_obj.exists():
|
||||
log(f"File not found: {media_source}", file=sys.stderr)
|
||||
continue
|
||||
|
||||
path_obj = Path(file_path)
|
||||
if not path_obj.exists():
|
||||
log(f"File not found: {file_path}", file=sys.stderr)
|
||||
continue
|
||||
|
||||
# Determine output path
|
||||
# Prepend clip_ to filename
|
||||
new_filename = f"clip_{path_obj.name}"
|
||||
output_path = path_obj.parent / new_filename
|
||||
# Determine output directory
|
||||
output_dir: Path
|
||||
if outdir_arg:
|
||||
output_dir = Path(str(outdir_arg)).expanduser()
|
||||
elif store_name:
|
||||
from config import resolve_output_dir
|
||||
output_dir = resolve_output_dir(config or {})
|
||||
elif path_obj is not None:
|
||||
output_dir = path_obj.parent
|
||||
else:
|
||||
from config import resolve_output_dir
|
||||
output_dir = resolve_output_dir(config or {})
|
||||
|
||||
try:
|
||||
output_dir.mkdir(parents=True, exist_ok=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Determine output filename
|
||||
output_ext = ''
|
||||
if path_obj is not None:
|
||||
output_ext = path_obj.suffix
|
||||
base_name = path_obj.stem
|
||||
else:
|
||||
# Prefer title from metadata if present
|
||||
title = extract_title_from_result(item)
|
||||
if title:
|
||||
base_name = _sanitize_filename(str(title))
|
||||
else:
|
||||
base_name = time.strftime('%Y%m%d-%H%M%S')
|
||||
|
||||
if base_name.lower().startswith('clip_'):
|
||||
base_name = base_name[5:] or base_name
|
||||
|
||||
try:
|
||||
p = urlparse(str(media_source))
|
||||
last = (p.path or '').split('/')[-1]
|
||||
if last and '.' in last:
|
||||
output_ext = '.' + last.split('.')[-1]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if not output_ext or len(output_ext) > 8:
|
||||
output_ext = '.mkv'
|
||||
|
||||
new_filename = f"clip_{base_name}{output_ext}"
|
||||
output_path = output_dir / new_filename
|
||||
|
||||
# Avoid clobbering existing files
|
||||
if output_path.exists():
|
||||
stem = output_path.stem
|
||||
suffix = output_path.suffix
|
||||
for i in range(1, 1000):
|
||||
candidate = output_dir / f"{stem}_{i}{suffix}"
|
||||
if not candidate.exists():
|
||||
output_path = candidate
|
||||
break
|
||||
|
||||
# Trim
|
||||
log(f"Trimming {path_obj.name} ({start_str} to {end_str})...", file=sys.stderr)
|
||||
if _trim_media(path_obj, output_path, start_str, end_str):
|
||||
source_label = (path_obj.name if path_obj is not None else str(media_source))
|
||||
log(f"Trimming {source_label} ({start_str} to {end_str})...", file=sys.stderr)
|
||||
if _trim_media(str(media_source), output_path, start_seconds, duration_seconds):
|
||||
log(f"Created clip: {output_path}", file=sys.stderr)
|
||||
success_count += 1
|
||||
|
||||
@@ -178,98 +359,104 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
|
||||
source_hash = item.hash
|
||||
|
||||
if not source_hash:
|
||||
try:
|
||||
source_hash = sha256_file(path_obj)
|
||||
except Exception:
|
||||
pass
|
||||
if path_obj is not None:
|
||||
try:
|
||||
source_hash = sha256_file(path_obj)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 2. Get tag values
|
||||
tags = extract_tag_from_result(item)
|
||||
# Do not inherit tags from the source (per UX request).
|
||||
new_tags: list[str] = []
|
||||
|
||||
# Copy URL(s) when present.
|
||||
urls: list[str] = []
|
||||
try:
|
||||
urls = extract_url_from_result(item) or []
|
||||
except Exception:
|
||||
urls = []
|
||||
try:
|
||||
src_u = get_field(item, "source_url")
|
||||
if isinstance(src_u, str) and src_u.strip():
|
||||
if src_u.strip() not in urls:
|
||||
urls.append(src_u.strip())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 3. Get title and modify it
|
||||
title = extract_title_from_result(item)
|
||||
if not title:
|
||||
title = path_obj.stem
|
||||
title = path_obj.stem if path_obj is not None else base_name
|
||||
|
||||
range_hms = f"{_format_hms(start_seconds)}-{_format_hms(end_seconds)}"
|
||||
new_title = f"[{range_hms}] - {title}"
|
||||
|
||||
new_title = f"clip_{title}"
|
||||
|
||||
# Update title tag if present
|
||||
new_tags = []
|
||||
has_title_tag = False
|
||||
for t in tags:
|
||||
if t.lower().startswith("title:"):
|
||||
new_tags.append(f"title:{new_title}")
|
||||
has_title_tag = True
|
||||
else:
|
||||
new_tags.append(t)
|
||||
|
||||
if not has_title_tag:
|
||||
new_tags.append(f"title:{new_title}")
|
||||
|
||||
# 4. Calculate clip hash and update original file's relationships
|
||||
# 4. Calculate clip hash
|
||||
clip_hash = None
|
||||
try:
|
||||
clip_hash = sha256_file(output_path)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if source_hash and clip_hash:
|
||||
# Update original file in local DB if possible
|
||||
# If this was a store item, ingest the clip into the same store.
|
||||
stored_store: Optional[str] = None
|
||||
stored_hash: Optional[str] = None
|
||||
stored_path: Optional[str] = None
|
||||
|
||||
if store_name:
|
||||
try:
|
||||
from config import get_local_storage_path
|
||||
from API.folder import API_folder_store
|
||||
|
||||
storage_path = get_local_storage_path(config)
|
||||
if storage_path:
|
||||
with API_folder_store(storage_path) as db:
|
||||
# Get original file metadata
|
||||
# We need to find the original file by hash or path
|
||||
# Try path first
|
||||
orig_meta = db.get_metadata(path_obj)
|
||||
if not orig_meta and source_hash:
|
||||
# Try by hash
|
||||
orig_path_resolved = db.search_hash(source_hash)
|
||||
if orig_path_resolved:
|
||||
orig_meta = db.get_metadata(orig_path_resolved)
|
||||
|
||||
if orig_meta:
|
||||
# Update relationships
|
||||
rels = orig_meta.get("relationships", {})
|
||||
if not isinstance(rels, dict):
|
||||
rels = {}
|
||||
|
||||
# Add clip as "derivative" (since original is the source)
|
||||
if "derivative" not in rels:
|
||||
rels["derivative"] = []
|
||||
|
||||
if clip_hash not in rels["derivative"]:
|
||||
rels["derivative"].append(clip_hash)
|
||||
|
||||
# Save back to DB
|
||||
# We need to preserve other metadata
|
||||
orig_meta["relationships"] = rels
|
||||
|
||||
# Ensure hash is set in metadata if we have it
|
||||
if source_hash and not orig_meta.get("hash"):
|
||||
orig_meta["hash"] = source_hash
|
||||
|
||||
# We need the path to save
|
||||
save_path = Path(orig_meta.get("path") or path_obj)
|
||||
db.save_metadata(save_path, orig_meta)
|
||||
log(f"Updated relationship for original file: {save_path.name}", file=sys.stderr)
|
||||
except Exception as e:
|
||||
log(f"Failed to update original file relationships: {e}", file=sys.stderr)
|
||||
store = Store(config)
|
||||
if store.is_available(store_name):
|
||||
backend = store[str(store_name)]
|
||||
move_flag = type(backend).__name__ == "Folder"
|
||||
stored_hash = backend.add_file(
|
||||
Path(str(output_path)),
|
||||
title=new_title,
|
||||
tag=new_tags,
|
||||
url=urls,
|
||||
move=move_flag,
|
||||
)
|
||||
stored_store = store_name
|
||||
|
||||
# Best-effort resolve stored path for folder backends.
|
||||
try:
|
||||
if type(backend).__name__ == "Folder" and hasattr(backend, "get_file"):
|
||||
p = backend.get_file(str(stored_hash))
|
||||
if isinstance(p, Path):
|
||||
stored_path = str(p)
|
||||
elif isinstance(p, str) and p:
|
||||
stored_path = p
|
||||
except Exception:
|
||||
stored_path = None
|
||||
except Exception as exc:
|
||||
log(f"Failed to add clip to store '{store_name}': {exc}", file=sys.stderr)
|
||||
|
||||
# If we stored it, persist relationship alt -> king in that store.
|
||||
if stored_store and stored_hash and source_hash:
|
||||
_persist_alt_relationship(
|
||||
config=config,
|
||||
store_name=stored_store,
|
||||
alt_hash=stored_hash,
|
||||
king_hash=str(source_hash),
|
||||
)
|
||||
|
||||
if stored_hash:
|
||||
clip_hash = stored_hash
|
||||
|
||||
# 5. Construct result
|
||||
result_dict = {
|
||||
"path": str(output_path),
|
||||
"path": stored_path or str(output_path),
|
||||
"title": new_title,
|
||||
"tag": new_tags,
|
||||
"url": urls,
|
||||
"media_kind": "video", # Assumption, or derive
|
||||
"hash": clip_hash, # Pass calculated hash
|
||||
"store": stored_store,
|
||||
"relationships": {
|
||||
# The source is the KING of this clip
|
||||
"king": [source_hash] if source_hash else []
|
||||
# Clip is an ALT of the source; store semantics are directional alt -> king.
|
||||
# Provide both keys so downstream (e.g. add-file) can persist relationships.
|
||||
"king": [source_hash] if source_hash else [],
|
||||
"alt": [clip_hash] if (source_hash and clip_hash) else [],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -279,15 +466,17 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
|
||||
# Delete original if requested
|
||||
if delete_original:
|
||||
try:
|
||||
path_obj.unlink()
|
||||
log(f"Deleted original file: {path_obj}", file=sys.stderr)
|
||||
if path_obj is not None:
|
||||
path_obj.unlink()
|
||||
log(f"Deleted original file: {path_obj}", file=sys.stderr)
|
||||
# Also try to delete sidecars?
|
||||
# Maybe leave that to user or cleanup cmdlet
|
||||
except Exception as e:
|
||||
log(f"Failed to delete original: {e}", file=sys.stderr)
|
||||
|
||||
else:
|
||||
log(f"Failed to trim {path_obj.name}", file=sys.stderr)
|
||||
failed_label = (path_obj.name if path_obj is not None else str(media_source))
|
||||
log(f"Failed to trim {failed_label}", file=sys.stderr)
|
||||
|
||||
return 0 if success_count > 0 else 1
|
||||
|
||||
|
||||
84
docs/BOOTSTRAP.md
Normal file
84
docs/BOOTSTRAP.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Bootstrapping the development environment
|
||||
|
||||
This project includes convenience scripts to create a Python virtual environment, install the package, and (optionally) create OS shortcuts.
|
||||
|
||||
Files:
|
||||
- `scripts/bootstrap.ps1` — PowerShell script for Windows (creates venv, installs, optional Desktop/Start Menu shortcuts)
|
||||
- `scripts/bootstrap.sh` — POSIX shell script (Linux/macOS) (creates venv, installs, optional desktop launcher)
|
||||
|
||||
Quick examples
|
||||
|
||||
Windows (PowerShell):
|
||||
|
||||
```powershell
|
||||
# Create a .venv, install in editable mode and add a Desktop shortcut
|
||||
powershell -ExecutionPolicy Bypass -File .\scripts\bootstrap.ps1 -Editable -CreateDesktopShortcut
|
||||
|
||||
# Use a specific python.exe and force overwrite
|
||||
powershell -ExecutionPolicy Bypass -File .\scripts\bootstrap.ps1 -Python "C:\\Python39\\python.exe" -Force
|
||||
```
|
||||
|
||||
Linux/macOS (bash):
|
||||
|
||||
```bash
|
||||
# Create a .venv and install the project in editable mode
|
||||
./scripts/bootstrap.sh --editable
|
||||
|
||||
# Create a desktop entry (GNU/Linux)
|
||||
./scripts/bootstrap.sh --editable --desktop
|
||||
```
|
||||
|
||||
Notes
|
||||
|
||||
- On Windows you may need to run PowerShell with an appropriate ExecutionPolicy (example shows using `-ExecutionPolicy Bypass`).
|
||||
- The scripts default to a venv directory named `.venv` in the repository root. Use `-VenvPath` (PowerShell) or `--venv` (bash) to choose a different directory.
|
||||
- The scripts are intended to make day-to-day developer setup easy; tweak flags for your desired install mode (editable vs normal) and shortcut preferences.
|
||||
|
||||
## Deno — installed by bootstrap
|
||||
|
||||
The bootstrap scripts will automatically install Deno if it is not already present on the system. They use the official installers and attempt to add Deno's bin directory to the PATH for the current session. If the installer completes but `deno` is not available in your shell, restart your shell or add `$HOME/.deno/bin` (Windows: `%USERPROFILE%\\.deno\\bin`) to your PATH.
|
||||
|
||||
Opinionated behavior
|
||||
|
||||
Running `python ./scripts/setup.py` is intentionally opinionated: it will create a local virtual environment at `./.venv` (repo root), install Python dependencies and the project into that venv, install Playwright browsers, install Deno, and write small launcher scripts in the project root:
|
||||
|
||||
- `mm` (POSIX shell)
|
||||
- `mm.ps1` (PowerShell)
|
||||
- `mm.bat` (Windows CMD)
|
||||
|
||||
These launchers prefer the local `./.venv` Python and console scripts so you can run the project with `./mm` or `mm.ps1` directly from the repo root.
|
||||
|
||||
Additionally, the setup helpers install a global `mm` launcher into your user bin so you can run `mm` from any shell session:
|
||||
|
||||
- POSIX: `~/.local/bin/mm` (created if missing; the script attempts to add `~/.local/bin` to `PATH` by updating `~/.profile` / shell RCs if required)
|
||||
- Windows: `%USERPROFILE%\bin\mm.cmd` and `%USERPROFILE%\bin\mm.ps1` (created if missing; the script attempts to add the folder to your **User** PATH)
|
||||
|
||||
The scripts back up any existing `mm` shims before replacing them and will print actionable messages when a shell restart is required.
|
||||
|
||||
PowerShell (Windows):
|
||||
```powershell
|
||||
irm https://deno.land/install.ps1 | iex
|
||||
```
|
||||
|
||||
Linux/macOS:
|
||||
```bash
|
||||
curl -fsSL https://deno.land/install.sh | sh
|
||||
```
|
||||
|
||||
Pinning a Deno version
|
||||
|
||||
You can pin a Deno release by setting the `DENO_VERSION` environment variable before running the bootstrap script. Examples:
|
||||
|
||||
PowerShell (Windows):
|
||||
```powershell
|
||||
$env:DENO_VERSION = 'v1.34.3'; .\scripts\bootstrap.ps1
|
||||
```
|
||||
|
||||
POSIX (Linux/macOS):
|
||||
```bash
|
||||
DENO_VERSION=v1.34.3 ./scripts/bootstrap.sh
|
||||
```
|
||||
|
||||
If you'd like, I can also:
|
||||
- Add a short README section in `readme.md` referencing this doc, or
|
||||
- Add a small icon and polish Linux desktop entries with an icon path.
|
||||
9
medeia_macina/__init__.py
Normal file
9
medeia_macina/__init__.py
Normal file
@@ -0,0 +1,9 @@
|
||||
"""Top-level package for Medeia-Macina.
|
||||
|
||||
This package provides the `cli_entry` module which exposes the `main()` entry
|
||||
point used by command-line launchers.
|
||||
"""
|
||||
|
||||
__all__ = ["cli_entry"]
|
||||
|
||||
__version__ = "0.1.0"
|
||||
245
medeia_macina/cli_entry.py
Normal file
245
medeia_macina/cli_entry.py
Normal file
@@ -0,0 +1,245 @@
|
||||
"""CLI entrypoint module compatible with console scripts.
|
||||
|
||||
This wraps the existing `medeia_entry.py` runner so installers can set
|
||||
entry points to `medeia_macina.cli_entry:main`.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional, List, Tuple
|
||||
import sys
|
||||
import importlib
|
||||
from pathlib import Path
|
||||
import shlex
|
||||
|
||||
|
||||
def _parse_mode_and_strip_args(args: List[str]) -> Tuple[Optional[str], List[str]]:
|
||||
"""Parse --gui/--cli/--mode flags and return (mode, cleaned_args).
|
||||
|
||||
The function removes any mode flags from the argument list so the selected
|
||||
runner can receive the remaining arguments untouched.
|
||||
|
||||
Supported forms:
|
||||
--gui, -g, --gui=true
|
||||
--cli, -c, --cli=true
|
||||
--mode=gui|cli
|
||||
--mode gui|cli
|
||||
|
||||
Raises ValueError on conflicting or invalid flags.
|
||||
"""
|
||||
mode: Optional[str] = None
|
||||
out: List[str] = []
|
||||
i = 0
|
||||
while i < len(args):
|
||||
a = args[i]
|
||||
la = a.lower()
|
||||
|
||||
# --gui / -g
|
||||
if la in ("--gui", "-g"):
|
||||
if mode and mode != "gui":
|
||||
raise ValueError("Conflicting mode flags: found both 'gui' and 'cli'")
|
||||
mode = "gui"
|
||||
i += 1
|
||||
continue
|
||||
if la.startswith("--gui="):
|
||||
val = la.split("=", 1)[1]
|
||||
if val and val not in ("0", "false", "no", "off"):
|
||||
if mode and mode != "gui":
|
||||
raise ValueError("Conflicting mode flags: found both 'gui' and 'cli'")
|
||||
mode = "gui"
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# --cli / -c
|
||||
if la in ("--cli", "-c"):
|
||||
if mode and mode != "cli":
|
||||
raise ValueError("Conflicting mode flags: found both 'gui' and 'cli'")
|
||||
mode = "cli"
|
||||
i += 1
|
||||
continue
|
||||
if la.startswith("--cli="):
|
||||
val = la.split("=", 1)[1]
|
||||
if val and val not in ("0", "false", "no", "off"):
|
||||
if mode and mode != "cli":
|
||||
raise ValueError("Conflicting mode flags: found both 'gui' and 'cli'")
|
||||
mode = "cli"
|
||||
i += 1
|
||||
continue
|
||||
|
||||
# --mode
|
||||
if la.startswith("--mode="):
|
||||
val = la.split("=", 1)[1]
|
||||
val = val.lower()
|
||||
if val not in ("gui", "cli"):
|
||||
raise ValueError("--mode must be 'gui' or 'cli'")
|
||||
if mode and mode != val:
|
||||
raise ValueError("Conflicting mode flags: found both 'gui' and 'cli'")
|
||||
mode = val
|
||||
i += 1
|
||||
continue
|
||||
if la == "--mode":
|
||||
if i + 1 >= len(args):
|
||||
raise ValueError("--mode requires a value ('gui' or 'cli')")
|
||||
val = args[i + 1].lower()
|
||||
if val not in ("gui", "cli"):
|
||||
raise ValueError("--mode must be 'gui' or 'cli'")
|
||||
if mode and mode != val:
|
||||
raise ValueError("Conflicting mode flags: found both 'gui' and 'cli'")
|
||||
mode = val
|
||||
i += 2
|
||||
continue
|
||||
|
||||
# Not a mode flag; keep it
|
||||
out.append(a)
|
||||
i += 1
|
||||
|
||||
return mode, out
|
||||
|
||||
|
||||
def _import_medeia_entry_module():
|
||||
"""Import and return the top-level 'medeia_entry' module.
|
||||
|
||||
This attempts a regular import first. If that fails with ImportError it will
|
||||
try a few fallbacks useful for editable installs and running directly from
|
||||
the repository (searching for .egg-link, walking parents, or checking CWD).
|
||||
"""
|
||||
try:
|
||||
return importlib.import_module("medeia_entry")
|
||||
except ImportError:
|
||||
# Try to find the project root next to this installed package
|
||||
pkg_dir = Path(__file__).resolve().parent
|
||||
|
||||
# 1) Look for an .egg-link that points to the project root
|
||||
try:
|
||||
for egg in pkg_dir.glob("*.egg-link"):
|
||||
try:
|
||||
project_root = egg.read_text().splitlines()[0].strip()
|
||||
if project_root:
|
||||
candidate = Path(project_root) / "medeia_entry.py"
|
||||
if candidate.exists():
|
||||
if str(Path(project_root)) not in sys.path:
|
||||
sys.path.insert(0, str(Path(project_root)))
|
||||
return importlib.import_module("medeia_entry")
|
||||
except Exception:
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 2) Walk upwards looking for a top-level 'medeia_entry.py'
|
||||
for parent in pkg_dir.parents:
|
||||
candidate = parent / "medeia_entry.py"
|
||||
if candidate.exists():
|
||||
if str(parent) not in sys.path:
|
||||
sys.path.insert(0, str(parent))
|
||||
return importlib.import_module("medeia_entry")
|
||||
|
||||
# 3) Check current working directory
|
||||
candidate = Path.cwd() / "medeia_entry.py"
|
||||
if candidate.exists():
|
||||
if str(Path.cwd()) not in sys.path:
|
||||
sys.path.insert(0, str(Path.cwd()))
|
||||
return importlib.import_module("medeia_entry")
|
||||
|
||||
raise ImportError(
|
||||
"Could not import 'medeia_entry'. Ensure the project was installed properly or run from the repo root."
|
||||
)
|
||||
|
||||
|
||||
def _run_cli(clean_args: List[str]) -> int:
|
||||
"""Run the CLI runner (MedeiaCLI) with cleaned argv list."""
|
||||
try:
|
||||
sys.argv[1:] = list(clean_args)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
mod = _import_medeia_entry_module()
|
||||
try:
|
||||
MedeiaCLI = getattr(mod, "MedeiaCLI")
|
||||
except AttributeError:
|
||||
raise ImportError("Imported module 'medeia_entry' does not define 'MedeiaCLI'")
|
||||
|
||||
try:
|
||||
app = MedeiaCLI()
|
||||
app.run()
|
||||
return 0
|
||||
except SystemExit as exc:
|
||||
return int(getattr(exc, "code", 0) or 0)
|
||||
|
||||
|
||||
def _run_gui(clean_args: List[str]) -> int:
|
||||
"""Run the TUI runner (PipelineHubApp).
|
||||
|
||||
The TUI is imported lazily; if Textual or the TUI code is unavailable we
|
||||
give a helpful error message and exit non‑zero.
|
||||
"""
|
||||
try:
|
||||
tui_mod = importlib.import_module("TUI.tui")
|
||||
except Exception as exc:
|
||||
print(
|
||||
"Error: Unable to import TUI (Textual may not be installed):",
|
||||
exc,
|
||||
file=sys.stderr,
|
||||
)
|
||||
return 2
|
||||
|
||||
try:
|
||||
PipelineHubApp = getattr(tui_mod, "PipelineHubApp")
|
||||
except AttributeError:
|
||||
print("Error: 'TUI.tui' does not expose 'PipelineHubApp'", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
try:
|
||||
app = PipelineHubApp()
|
||||
app.run()
|
||||
return 0
|
||||
except SystemExit as exc:
|
||||
return int(getattr(exc, "code", 0) or 0)
|
||||
|
||||
|
||||
def main(argv: Optional[List[str]] = None) -> int:
|
||||
"""Entry point for console_scripts.
|
||||
|
||||
Accepts an optional argv list (useful for testing). Mode flags are parsed
|
||||
and removed before dispatching to the selected runner.
|
||||
"""
|
||||
args = list(argv) if argv is not None else list(sys.argv[1:])
|
||||
|
||||
try:
|
||||
mode, clean_args = _parse_mode_and_strip_args(args)
|
||||
except ValueError as exc:
|
||||
print(f"Error parsing mode flags: {exc}", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
# If GUI requested, delegate directly (GUI may decide to honor any args itself)
|
||||
if mode == "gui":
|
||||
return _run_gui(clean_args)
|
||||
|
||||
# Support quoting a pipeline (or even a single full command) on the command line.
|
||||
#
|
||||
# - If the user provides a single argument that contains a pipe character,
|
||||
# treat it as a pipeline and rewrite the args to call the internal `pipeline`
|
||||
# subcommand so existing CLI pipeline handling is used.
|
||||
#
|
||||
# - If the user provides a single argument that contains whitespace but no pipe,
|
||||
# expand it into argv tokens (PowerShell commonly encourages quoting strings).
|
||||
#
|
||||
# Examples:
|
||||
# mm "download-media <url> | add-tag 'x' | add-file -store local"
|
||||
# mm "download-media '<url>' -query 'format:720p' -path 'C:\\out'"
|
||||
if len(clean_args) == 1:
|
||||
single = clean_args[0]
|
||||
if "|" in single and not single.startswith("-"):
|
||||
clean_args = ["pipeline", "--pipeline", single]
|
||||
elif (not single.startswith("-")) and any(ch.isspace() for ch in single):
|
||||
try:
|
||||
expanded = shlex.split(single, posix=True)
|
||||
if expanded:
|
||||
clean_args = list(expanded)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Default to CLI if --cli is requested or no explicit mode provided.
|
||||
return _run_cli(clean_args)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
raise SystemExit(main())
|
||||
@@ -39,7 +39,6 @@ dependencies = [
|
||||
"yt-dlp-ejs", # EJS challenge solver scripts for YouTube JavaScript challenges
|
||||
"requests>=2.31.0",
|
||||
"httpx>=0.25.0",
|
||||
"ffmpeg-python>=0.2.0",
|
||||
|
||||
# Document and data handling
|
||||
"pypdf>=3.0.0",
|
||||
@@ -49,13 +48,13 @@ dependencies = [
|
||||
# Image and media support
|
||||
"Pillow>=10.0.0",
|
||||
"python-bidi>=0.4.2",
|
||||
"ffmpeg-python>=0.2.0",
|
||||
|
||||
# Metadata extraction and processing
|
||||
"musicbrainzngs>=0.7.0",
|
||||
"lxml>=4.9.0",
|
||||
|
||||
# Advanced searching and libraries
|
||||
"libgen-api>=1.0.0",
|
||||
"aioslsk>=1.6.0",
|
||||
"imdbinfo>=0.1.10",
|
||||
|
||||
@@ -104,14 +103,18 @@ dev = [
|
||||
mm = "medeia_macina.cli_entry:main"
|
||||
medeia = "medeia_macina.cli_entry:main"
|
||||
|
||||
[project.url]
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/yourusername/medeia-macina"
|
||||
Documentation = "https://medeia-macina.readthedocs.io"
|
||||
Repository = "https://github.com/yourusername/medeia-macina.git"
|
||||
Issues = "https://github.com/yourusername/medeia-macina/issues"
|
||||
|
||||
[tool.setuptools]
|
||||
packages = ["cmdlet", "helper", "TUI", "medeia_macina"]
|
||||
py-modules = ["medeia_entry"]
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["."]
|
||||
exclude = ["tests*", "docs*"]
|
||||
|
||||
[tool.black]
|
||||
line-length = 100
|
||||
|
||||
33
readme.md
33
readme.md
@@ -2,14 +2,17 @@
|
||||
|
||||
Medios-Macina is a CLI media manager and toolkit focused on downloading, tagging, and media storage (audio, video, images, and text) from a variety of providers and sources. It is designed around a compact, pipeable command language ("cmdlets") so complex workflows can be composed simply and repeatably.
|
||||
|
||||
## Highlights ✅
|
||||
- Flexible pipeline-based CLI: chain cmdlets with `|` and use saved selections with `@N`.
|
||||
- Multi-store support: HydrusNetwork, local folders, and provider-backed stores.
|
||||
- Provider integrations: YouTube, OpenLibrary/Archive.org, Soulseek, LibGen, AllDebrid, and more.
|
||||
- Utility cmdlets: screenshots (Playwright), metadata extraction, merging, and automated tagging.
|
||||
- MPV-friendly: integrate with MPV playback and playlists for quick ingestion.
|
||||
## Features
|
||||
- **Flexible syntax structure:** chain commands with `|` and select options from tables with `@N`.
|
||||
- **Multiple file stores:** *HYDRUSNETWORK, FOLDER*
|
||||
- **Provider plugin integration:** *YOUTUBE, OPENLIBRARY/ARCHIVE.ORG, SOULSEEK, LIBGEN, ALLDEBRID, TELEGRAM, BANDCAMP*
|
||||
- **Module Mixing:** *[Playwright](https://github.com/microsoft/playwright), [yt-dlp](https://github.com/yt-dlp/yt-dlp), [aioslsk](https://github.com/JurgenR/aioslsk), [telethon](https://github.com/LonamiWebs/Telethon),[typer](https://github.com/fastapi/typer)*
|
||||
- **MPV Manager:** Play audio, video, and even images in a custom designed MPV with trimming, screenshotting, and more built right in!
|
||||
|
||||
## Quick start ⚡
|
||||
|
||||
Prefer an automated setup? See `docs/BOOTSTRAP.md` and use `scripts/bootstrap.ps1` (Windows) or `scripts/bootstrap.sh` (Linux/macOS) to create a venv and install the project. Alternatively, run the opinionated helper: `python ./scripts/setup.py` which creates a `.venv` at the repo root, installs dependencies into it, writes convenient `mm` launchers in the project root, and installs a global `mm` shim into your user PATH so you can run `mm` from anywhere.
|
||||
|
||||
1. Install Python requirements:
|
||||
|
||||
```powershell
|
||||
@@ -55,9 +58,21 @@ python cli.py
|
||||
|
||||
## Usage overview 🔧
|
||||
- Pipelines: chain cmdlets with `|`, e.g., `download-media <url> | add-file -storage local`.
|
||||
- From your shell you can pass a fully-quoted pipeline so the shell doesn't interpret `|` as a pipe: e.g.
|
||||
`mm "download-media <url> | add-file -storage local"`
|
||||
- Format selection (non-interactive): When `download-media` shows multiple formats, you can select one non-interactively by re-running the pipeline and specifying the format:
|
||||
- Use a format id: `mm "download-media '<url>' -format 243 | add-file -store local"`
|
||||
- Or use the listed index (1-based): `mm "download-media '<url>' -query 'format:7' | add-file -store local"`
|
||||
Note: The `@N` selection syntax works in the interactive REPL, but shells like PowerShell treat `@` specially — prefer `-query 'format:N'` when running a quoted pipeline from your shell.
|
||||
- Selections: search cmdlets populate a selectable ResultTable; refer to entries with `@<index>`.
|
||||
- Tagging & metadata: `add-tag` mutates piped results (temporary path items) or writes to a configured store when `-store` is provided.
|
||||
|
||||
## Built-in image viewer 🎞️
|
||||
- MPV automatically detects still-image files and flips into an image viewer mode while leaving the IPC helper aware via `user-data/mpv/image`.
|
||||
- Arrow keys, `WASD`, or `h/j/k/l` pan the image (recently tuned to ±0.05 steps), `Shift+arrow` offers finer nudges, `=`/`-` zoom quickly (~45% per press), `+`/`_` zoom slowly, and `0` resets zoom/pan back to default.
|
||||
- Hit `f` while an image is active to take a screenshot (uses MPV's screenshot pipeline) and get an OSD confirmation.
|
||||
- When MPV loads a video again, the script restores the regular video shortcuts automatically.
|
||||
|
||||
## Common examples 💡
|
||||
|
||||
Simple download with metadata (tags and URL registration):
|
||||
@@ -99,10 +114,12 @@ download-media [URL] | add-file -store hydrus
|
||||
- For Playwright screenshots, run `python ./scripts/setup.py` (installs Chromium by default to save download space). To install all engines, run `python ./scripts/setup.py --browsers all`.
|
||||
- Note: the `screen-shot` cmdlet forces the Playwright **Chromium** engine and will not use Firefox or WebKit.
|
||||
- To run tests locally after removing `tests/conftest.py`, install the project in editable mode first so tests can import the package: `python -m pip install -e .` or run `python ./scripts/setup.py --install-editable`.
|
||||
- Deno: this setup script now **installs Deno by default**. To opt out, run `python ./scripts/setup.py --no-deno`. You can still pin a version: `python ./scripts/setup.py --deno-version v1.34.3`.
|
||||
- Deno: The bootstrap scripts will install Deno automatically if it's not already installed (using the official installers). If the installer completes but `deno` is not available in your shell, restart your shell or add `$HOME/.deno/bin` (Windows: `%USERPROFILE%\\.deno\\bin`) to your PATH.
|
||||
|
||||
After installation, restart your shell (or add Deno's bin directory to your PATH) so `deno` is available on the command line.
|
||||
- Use `--debug` to enable verbose logs when tracking down an error.
|
||||
|
||||
## Contributing & docs ✨
|
||||
## Contributing & docs
|
||||
- Developer docs are generated under `docs/` and tests live alongside the code; please run the test suite before submitting changes.
|
||||
- Contributions welcome—open issues or pull requests with clear descriptions and small, focused diffs.
|
||||
|
||||
|
||||
@@ -8,7 +8,6 @@ textual>=0.30.0
|
||||
yt-dlp[default]>=2023.11.0
|
||||
requests>=2.31.0
|
||||
httpx>=0.25.0
|
||||
ffmpeg-python>=0.2.0
|
||||
telethon>=1.36.0
|
||||
|
||||
# Document and data handling
|
||||
@@ -19,13 +18,13 @@ cbor2>=4.0
|
||||
# Image and media support
|
||||
Pillow>=10.0.0
|
||||
python-bidi>=0.4.2
|
||||
ffmpeg-python>=0.2.0
|
||||
|
||||
# Metadata extraction and processing
|
||||
musicbrainzngs>=0.7.0
|
||||
lxml>=4.9.0
|
||||
|
||||
# Advanced searching and libraries
|
||||
libgen-api>=1.0.0
|
||||
aioslsk>=1.6.0
|
||||
imdbinfo>=0.1.10
|
||||
|
||||
|
||||
@@ -15,6 +15,19 @@ from typing import Any, TextIO
|
||||
from rich.console import Console
|
||||
|
||||
|
||||
# Configure Rich pretty-printing to avoid truncating long strings (hashes/paths).
|
||||
# This is version-safe: older Rich versions may not support the max_* arguments.
|
||||
try:
|
||||
from rich.pretty import install as _pretty_install
|
||||
|
||||
try:
|
||||
_pretty_install(max_string=100_000, max_length=100_000)
|
||||
except TypeError:
|
||||
_pretty_install()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
_STDOUT_CONSOLE = Console(file=sys.stdout)
|
||||
_STDERR_CONSOLE = Console(file=sys.stderr)
|
||||
|
||||
|
||||
319
scripts/bootstrap.ps1
Normal file
319
scripts/bootstrap.ps1
Normal file
@@ -0,0 +1,319 @@
|
||||
<#
|
||||
.SYNOPSIS
|
||||
Bootstrap a Python virtualenv and install the project on Windows (PowerShell).
|
||||
|
||||
.DESCRIPTION
|
||||
Creates a Python virtual environment (default: .venv), upgrades pip, installs the project
|
||||
(either editable or normal), and optionally creates Desktop and Start Menu shortcuts.
|
||||
|
||||
.EXAMPLE
|
||||
# Create .venv and install in editable mode, create Desktop shortcut
|
||||
.\scripts\bootstrap.ps1 -Editable -CreateDesktopShortcut
|
||||
|
||||
.EXAMPLE
|
||||
# Use a specific python executable and force overwrite existing venv
|
||||
.\scripts\bootstrap.ps1 -Python "C:\\Python39\\python.exe" -Force
|
||||
# Note: you may need to run PowerShell with ExecutionPolicy Bypass:
|
||||
# powershell -ExecutionPolicy Bypass -File .\scripts\bootstrap.ps1 -Editable
|
||||
#>
|
||||
|
||||
param(
|
||||
[switch]$Editable,
|
||||
[switch]$CreateDesktopShortcut,
|
||||
[switch]$CreateStartMenuShortcut,
|
||||
[string]$VenvPath = ".venv",
|
||||
[string]$Python = "",
|
||||
[switch]$Force,
|
||||
[switch]$NoInstall,
|
||||
[switch]$Quiet
|
||||
)
|
||||
|
||||
function Write-Log {
|
||||
param([string]$msg,[string]$lvl="INFO")
|
||||
if (-not $Quiet) {
|
||||
if ($lvl -eq "ERROR") { Write-Host "[$lvl] $msg" -ForegroundColor Red } else { Write-Host "[$lvl] $msg" }
|
||||
}
|
||||
}
|
||||
|
||||
function Find-Python {
|
||||
param([string]$preferred)
|
||||
$candidates = @()
|
||||
if ($preferred -and $preferred.Trim()) { $candidates += $preferred }
|
||||
$candidates += @("python","python3","py")
|
||||
foreach ($c in $candidates) {
|
||||
try {
|
||||
if ($c -eq "py") {
|
||||
$out = & py -3 -c "import sys, json; print(sys.executable)" 2>$null
|
||||
if ($out) { return $out.Trim() }
|
||||
} else {
|
||||
$out = & $c -c "import sys, json; print(sys.executable)" 2>$null
|
||||
if ($out) { return $out.Trim() }
|
||||
}
|
||||
} catch {}
|
||||
}
|
||||
return $null
|
||||
}
|
||||
|
||||
# Resolve OS detection in a broad-compatible way
|
||||
try { $IsWindowsPlatform = [System.Runtime.InteropServices.RuntimeInformation]::IsOSPlatform([System.Runtime.InteropServices.OSPlatform]::Windows) } catch { $IsWindowsPlatform = $env:OS -match 'Windows' }
|
||||
|
||||
# operate from repo root (parent of scripts dir)
|
||||
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
||||
$repoRoot = (Resolve-Path (Join-Path $scriptDir "..")).Path
|
||||
Set-Location $repoRoot
|
||||
|
||||
$pythonExe = Find-Python -preferred $Python
|
||||
if (-not $pythonExe) { Write-Log "No Python interpreter found. Specify -Python <path> or install Python." "ERROR"; exit 2 }
|
||||
Write-Log "Using Python: $pythonExe"
|
||||
|
||||
# Full venv path
|
||||
try { $venvFull = (Resolve-Path -LiteralPath $VenvPath -ErrorAction SilentlyContinue).Path } catch { $venvFull = $null }
|
||||
if (-not $venvFull) { $venvFull = (Join-Path $repoRoot $VenvPath) }
|
||||
|
||||
# Handle existing venv
|
||||
$venvExists = Test-Path $venvFull
|
||||
if ($venvExists) {
|
||||
if ($Force) {
|
||||
Write-Log "Removing existing venv at $venvFull"
|
||||
Remove-Item -Recurse -Force $venvFull
|
||||
$venvExists = $false
|
||||
} else {
|
||||
# Quick health check: does the existing venv have a python executable?
|
||||
$venvPy1 = Join-Path $venvFull "Scripts\python.exe"
|
||||
$venvPy2 = Join-Path $venvFull "bin/python"
|
||||
$venvHasPython = $false
|
||||
try {
|
||||
if (Test-Path $venvPy1 -PathType Leaf -ErrorAction SilentlyContinue) { $venvHasPython = $true }
|
||||
elseif (Test-Path $venvPy2 -PathType Leaf -ErrorAction SilentlyContinue) { $venvHasPython = $true }
|
||||
} catch {}
|
||||
|
||||
if (-not $venvHasPython) {
|
||||
if ($Quiet) {
|
||||
Write-Log "Existing venv appears incomplete or broken and quiet mode prevents prompting. Use -Force to recreate." "ERROR"
|
||||
exit 4
|
||||
}
|
||||
$ans = Read-Host "$venvFull exists but appears invalid (no python executable). Overwrite to recreate? (y/N)"
|
||||
if ($ans -eq 'y' -or $ans -eq 'Y') {
|
||||
Write-Log "Removing broken venv at $venvFull"
|
||||
Remove-Item -Recurse -Force $venvFull
|
||||
$venvExists = $false
|
||||
} else {
|
||||
Write-Log "Aborted due to broken venv." "ERROR"; exit 4
|
||||
}
|
||||
} else {
|
||||
if ($Quiet) {
|
||||
Write-Log "Using existing venv at $venvFull (quiet mode)" "INFO"
|
||||
} else {
|
||||
$ans = Read-Host "$venvFull already exists. Overwrite? (y/N) (default: use existing venv)"
|
||||
if ($ans -eq 'y' -or $ans -eq 'Y') {
|
||||
Write-Log "Removing existing venv at $venvFull"
|
||||
Remove-Item -Recurse -Force $venvFull
|
||||
$venvExists = $false
|
||||
} else {
|
||||
Write-Log "Continuing using existing venv at $venvFull" "INFO"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (-not (Test-Path $venvFull)) {
|
||||
Write-Log "Creating venv at $venvFull"
|
||||
try {
|
||||
& $pythonExe -m venv $venvFull
|
||||
} catch {
|
||||
Write-Log "Failed to create venv: $_" "ERROR"; exit 3
|
||||
}
|
||||
} else {
|
||||
Write-Log "Using existing venv at $venvFull" "INFO"
|
||||
}
|
||||
|
||||
# Determine venv python executable
|
||||
$venvPython = Join-Path $venvFull "Scripts\python.exe"
|
||||
if (-not (Test-Path $venvPython)) { $venvPython = Join-Path $venvFull "bin/python" }
|
||||
if (-not (Test-Path $venvPython)) { Write-Log "Created venv but could not find python inside it." "ERROR"; exit 4 }
|
||||
|
||||
Write-Log "Using venv python: $venvPython"
|
||||
|
||||
if (-not $NoInstall) {
|
||||
Write-Log "Upgrading pip, setuptools, wheel"
|
||||
try { & $venvPython -m pip install -U pip setuptools wheel } catch { Write-Log "pip upgrade failed: $_" "ERROR"; exit 5 }
|
||||
|
||||
if ($Editable) { $editable_label = "(editable)" } else { $editable_label = "" }
|
||||
Write-Log ("Installing project {0}" -f $editable_label)
|
||||
try {
|
||||
if ($Editable) { & $venvPython -m pip install -e . } else { & $venvPython -m pip install . }
|
||||
} catch {
|
||||
Write-Log "pip install failed: $_" "ERROR"; exit 6
|
||||
}
|
||||
} else {
|
||||
Write-Log "Skipping install (--NoInstall set)"
|
||||
}
|
||||
|
||||
# Install Deno (official installer) - installed automatically
|
||||
try {
|
||||
$denoCmd = Get-Command 'deno' -ErrorAction SilentlyContinue
|
||||
} catch {
|
||||
$denoCmd = $null
|
||||
}
|
||||
if ($denoCmd) {
|
||||
Write-Log "Deno is already installed: $($denoCmd.Path)"
|
||||
} else {
|
||||
Write-Log "Installing Deno via official installer (https://deno.land)"
|
||||
try {
|
||||
try {
|
||||
irm https://deno.land/install.ps1 | iex
|
||||
} catch {
|
||||
iwr https://deno.land/install.ps1 -UseBasicParsing | iex
|
||||
}
|
||||
# Ensure common install locations are on PATH for this session
|
||||
$denoCandidatePaths = @(
|
||||
Join-Path $env:USERPROFILE ".deno\bin",
|
||||
Join-Path $env:LOCALAPPDATA "deno\bin"
|
||||
)
|
||||
foreach ($p in $denoCandidatePaths) {
|
||||
if (Test-Path $p) {
|
||||
if ($env:PATH -notmatch [regex]::Escape($p)) {
|
||||
$env:PATH = $env:PATH + ";" + $p
|
||||
}
|
||||
}
|
||||
}
|
||||
$v = & deno --version 2>$null
|
||||
if ($v) {
|
||||
Write-Log "Deno installed: $v"
|
||||
} else {
|
||||
Write-Log "Deno installer completed but 'deno' not found on PATH; you may need to restart your shell or add the Deno bin folder to PATH." "ERROR"
|
||||
}
|
||||
} catch {
|
||||
Write-Log "Deno install failed: $_" "ERROR"
|
||||
}
|
||||
}
|
||||
|
||||
# Shortcuts (Windows only)
|
||||
if ($IsWindowsPlatform) {
|
||||
if ($CreateDesktopShortcut -or $CreateStartMenuShortcut) {
|
||||
$wsh = New-Object -ComObject WScript.Shell
|
||||
$mmExe = Join-Path $venvFull "Scripts\mm.exe"
|
||||
$target = $null
|
||||
$args = ""
|
||||
if (Test-Path $mmExe) {
|
||||
$target = $mmExe
|
||||
} else {
|
||||
$target = $venvPython
|
||||
$args = "-m medeia_macina.cli_entry"
|
||||
}
|
||||
if ($CreateDesktopShortcut) {
|
||||
$desk = [Environment]::GetFolderPath('Desktop')
|
||||
$link = Join-Path $desk "Medeia-Macina.lnk"
|
||||
Write-Log "Creating Desktop shortcut: $link"
|
||||
$sc = $wsh.CreateShortcut($link)
|
||||
$sc.TargetPath = $target
|
||||
$sc.Arguments = $args
|
||||
$sc.WorkingDirectory = $repoRoot
|
||||
$sc.IconLocation = "$target,0"
|
||||
$sc.Save()
|
||||
}
|
||||
if ($CreateStartMenuShortcut) {
|
||||
$start = Join-Path ([Environment]::GetFolderPath('ApplicationData')) 'Microsoft\Windows\Start Menu\Programs'
|
||||
$dir = Join-Path $start "Medeia-Macina"
|
||||
New-Item -ItemType Directory -Path $dir -Force | Out-Null
|
||||
$link2 = Join-Path $dir "Medeia-Macina.lnk"
|
||||
Write-Log "Creating Start Menu shortcut: $link2"
|
||||
$sc2 = $wsh.CreateShortcut($link2)
|
||||
$sc2.TargetPath = $target
|
||||
$sc2.Arguments = $args
|
||||
$sc2.WorkingDirectory = $repoRoot
|
||||
$sc2.IconLocation = "$target,0"
|
||||
$sc2.Save()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Install global 'mm' launcher into the user's bin directory so it can be invoked from any shell.
|
||||
try {
|
||||
$globalBin = Join-Path $env:USERPROFILE 'bin'
|
||||
New-Item -ItemType Directory -Path $globalBin -Force | Out-Null
|
||||
|
||||
$mmCmd = Join-Path $globalBin 'mm.cmd'
|
||||
$mmPs1 = Join-Path $globalBin 'mm.ps1'
|
||||
|
||||
$repo = $repoRoot
|
||||
|
||||
$cmdText = @"
|
||||
@echo off
|
||||
set "REPO=__REPO__"
|
||||
if exist "%REPO%\.venv\Scripts\mm.exe" (
|
||||
"%REPO%\.venv\Scripts\mm.exe" %*
|
||||
exit /b %ERRORLEVEL%
|
||||
)
|
||||
if exist "%REPO%\.venv\Scripts\python.exe" (
|
||||
"%REPO%\.venv\Scripts\python.exe" -m medeia_macina.cli_entry %*
|
||||
exit /b %ERRORLEVEL%
|
||||
)
|
||||
python -m medeia_macina.cli_entry %*
|
||||
"@
|
||||
# Inject actual repo path safely (escape double-quotes if any)
|
||||
$cmdText = $cmdText.Replace('__REPO__', $repo.Replace('"', '""'))
|
||||
if (Test-Path $mmCmd) {
|
||||
$bak = "$mmCmd.bak$(Get-Date -UFormat %s)"
|
||||
Move-Item -Path $mmCmd -Destination $bak -Force
|
||||
}
|
||||
Set-Content -LiteralPath $mmCmd -Value $cmdText -Encoding UTF8
|
||||
|
||||
# PowerShell shim: use single-quoted here-string so literal PowerShell variables
|
||||
# (like $args) are not expanded by this script when writing the file.
|
||||
$ps1Text = @'
|
||||
Param([Parameter(ValueFromRemainingArguments=$true)] $args)
|
||||
$repo = "__REPO__"
|
||||
$venv = Join-Path $repo '.venv'
|
||||
$exe = Join-Path $venv 'Scripts\mm.exe'
|
||||
if (Test-Path $exe) { & $exe @args; exit $LASTEXITCODE }
|
||||
$py = Join-Path $venv 'Scripts\python.exe'
|
||||
if (Test-Path $py) { & $py -m medeia_entry @args; exit $LASTEXITCODE }
|
||||
# fallback
|
||||
python -m medeia_entry @args
|
||||
'@
|
||||
# Inject the actual repo path safely (escape embedded double-quotes if any)
|
||||
$ps1Text = $ps1Text.Replace('__REPO__', $repo.Replace('"', '""'))
|
||||
# Ensure the PowerShell shim falls back to the correct module when the venv isn't present
|
||||
$ps1Text = $ps1Text.Replace(' -m medeia_entry ', ' -m medeia_macina.cli_entry ')
|
||||
$ps1Text = $ps1Text.Replace('python -m medeia_entry', 'python -m medeia_macina.cli_entry')
|
||||
if (Test-Path $mmPs1) {
|
||||
$bak = "$mmPs1.bak$(Get-Date -UFormat %s)"
|
||||
Move-Item -Path $mmPs1 -Destination $bak -Force
|
||||
}
|
||||
Set-Content -LiteralPath $mmPs1 -Value $ps1Text -Encoding UTF8
|
||||
|
||||
# Ensure user's bin is on PATH (User env var)
|
||||
try {
|
||||
$cur = [Environment]::GetEnvironmentVariable('PATH', 'User')
|
||||
if ($cur -notlike "*$globalBin*") {
|
||||
if ($cur) { $new = ($globalBin + ';' + $cur) } else { $new = $globalBin }
|
||||
[Environment]::SetEnvironmentVariable('PATH', $new, 'User')
|
||||
# Update current session PATH for immediate use
|
||||
$env:PATH = $globalBin + ';' + $env:PATH
|
||||
Write-Log "Added $globalBin to User PATH. Restart your shell to pick this up." "INFO"
|
||||
} else {
|
||||
Write-Log "$globalBin is already on the User PATH" "INFO"
|
||||
}
|
||||
} catch {
|
||||
Write-Log "Failed to update user PATH: $_" "ERROR"
|
||||
}
|
||||
} catch {
|
||||
Write-Log "Failed to install global launcher: $_" "ERROR"
|
||||
}
|
||||
|
||||
Write-Log "Bootstrap complete." "INFO"
|
||||
Write-Host ""
|
||||
Write-Host "To activate the venv:"
|
||||
if ($IsWindowsPlatform) {
|
||||
Write-Host " PS> .\$VenvPath\Scripts\Activate.ps1"
|
||||
Write-Host " CMD> .\$VenvPath\Scripts\activate.bat"
|
||||
} else {
|
||||
Write-Host " $ source ./$VenvPath/bin/activate"
|
||||
}
|
||||
Write-Host ""
|
||||
Write-Host "To run the app:"
|
||||
Write-Host " $ .\$VenvPath\Scripts\mm.exe (Windows) or"
|
||||
Write-Host " $ ./$VenvPath/bin/mm (Linux) or"
|
||||
Write-Host " $ $venvPython -m medeia_macina.cli_entry"
|
||||
220
scripts/bootstrap.sh
Normal file
220
scripts/bootstrap.sh
Normal file
@@ -0,0 +1,220 @@
|
||||
#!/usr/bin/env bash
|
||||
# Bootstrap script for POSIX (Linux/macOS) to create a Python venv and install the project.
|
||||
# Usage: scripts/bootstrap.sh [--editable] [--venv <path>] [--python <python>] [--desktop] [--no-install]
|
||||
set -euo pipefail
|
||||
|
||||
VENV_PATH=".venv"
|
||||
EDITABLE=false
|
||||
DESKTOP=false
|
||||
PYTHON_CMD=""
|
||||
NOINSTALL=false
|
||||
FORCE=false
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
Usage: $0 [options]
|
||||
Options:
|
||||
-e, --editable Install project in editable mode (pip -e .)
|
||||
-p, --venv <path> Venv path (default: .venv)
|
||||
--python <python> Python executable to use (e.g. python3)
|
||||
-d, --desktop Create a desktop launcher (~/.local/share/applications and ~/Desktop)
|
||||
-n, --no-install Skip pip install
|
||||
-f, --force Overwrite existing venv without prompting
|
||||
-h, --help Show this help
|
||||
EOF
|
||||
}
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case "$1" in
|
||||
-e|--editable) EDITABLE=true; shift;;
|
||||
-p|--venv) VENV_PATH="$2"; shift 2;;
|
||||
--python) PYTHON_CMD="$2"; shift 2;;
|
||||
-d|--desktop) DESKTOP=true; shift;;
|
||||
-n|--no-install) NOINSTALL=true; shift;;
|
||||
-f|--force) FORCE=true; shift;;
|
||||
-h|--help) usage; exit 0;;
|
||||
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [[ -n "$PYTHON_CMD" ]]; then
|
||||
PY="$PYTHON_CMD"
|
||||
elif command -v python3 >/dev/null 2>&1; then
|
||||
PY=python3
|
||||
elif command -v python >/dev/null 2>&1; then
|
||||
PY=python
|
||||
else
|
||||
echo "ERROR: No python executable found; install Python or pass --python <path>" >&2
|
||||
exit 2
|
||||
fi
|
||||
|
||||
echo "Using Python: $PY"
|
||||
|
||||
if [[ -d "$VENV_PATH" ]]; then
|
||||
if [[ "$FORCE" == "true" ]]; then
|
||||
echo "Removing existing venv $VENV_PATH"
|
||||
rm -rf "$VENV_PATH"
|
||||
else
|
||||
read -p "$VENV_PATH already exists. Overwrite? [y/N] " REPLY
|
||||
if [[ "$REPLY" != "y" && "$REPLY" != "Y" ]]; then
|
||||
echo "Aborted."; exit 0
|
||||
fi
|
||||
rm -rf "$VENV_PATH"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Creating venv at $VENV_PATH"
|
||||
$PY -m venv "$VENV_PATH"
|
||||
VENV_PY="$VENV_PATH/bin/python"
|
||||
|
||||
if [[ ! -x "$VENV_PY" ]]; then
|
||||
echo "ERROR: venv python not found at $VENV_PY" >&2
|
||||
exit 3
|
||||
fi
|
||||
|
||||
if [[ "$NOINSTALL" != "true" ]]; then
|
||||
echo "Upgrading pip, setuptools, wheel..."
|
||||
"$VENV_PY" -m pip install -U pip setuptools wheel
|
||||
|
||||
if [[ "$EDITABLE" == "true" ]]; then
|
||||
echo "Installing project in editable mode..."
|
||||
"$VENV_PY" -m pip install -e .
|
||||
else
|
||||
echo "Installing project..."
|
||||
"$VENV_PY" -m pip install .
|
||||
fi
|
||||
|
||||
# Verify the installed CLI module can be imported. This helps catch packaging
|
||||
# or installation problems early (e.g., missing modules or mispackaged project).
|
||||
echo "Verifying installed CLI import..."
|
||||
if "$VENV_PY" -c 'import importlib; importlib.import_module("medeia_macina.cli_entry")' >/dev/null 2>&1; then
|
||||
echo "OK: 'medeia_macina.cli_entry' is importable in the venv."
|
||||
else
|
||||
echo "WARNING: Could not import 'medeia_macina.cli_entry' from the venv." >&2
|
||||
# Check if legacy top-level module is present; if so, inform the user to prefer the packaged entrypoint
|
||||
if "$VENV_PY" -c 'import importlib; importlib.import_module("medeia_entry")' >/dev/null 2>&1; then
|
||||
echo "Note: 'medeia_entry' top-level module is present. It's recommended to install the project so 'medeia_macina.cli_entry' is available." >&2
|
||||
else
|
||||
echo "Action: Try running: $VENV_PY -m pip install -e . or inspect the venv site-packages to verify the installation." >&2
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "Skipping install (--no-install)"
|
||||
fi
|
||||
|
||||
# Install Deno (official installer) - installed automatically
|
||||
if command -v deno >/dev/null 2>&1; then
|
||||
echo "Deno already installed: $(deno --version | head -n 1)"
|
||||
else
|
||||
echo "Installing Deno via official installer (https://deno.land)..."
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL https://deno.land/install.sh | sh
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget -qO- https://deno.land/install.sh | sh
|
||||
else
|
||||
echo "ERROR: curl or wget is required to install Deno automatically; please install Deno manually." >&2
|
||||
fi
|
||||
export DENO_INSTALL="${DENO_INSTALL:-$HOME/.deno}"
|
||||
export PATH="$DENO_INSTALL/bin:$PATH"
|
||||
if command -v deno >/dev/null 2>&1; then
|
||||
echo "Deno installed: $(deno --version | head -n 1)"
|
||||
else
|
||||
echo "Warning: Deno installer completed but 'deno' not found on PATH; add $HOME/.deno/bin to your PATH or restart your shell." >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$DESKTOP" == "true" ]]; then
|
||||
echo "Creating desktop launcher..."
|
||||
EXEC_PATH="$VENV_PATH/bin/mm"
|
||||
if [[ ! -x "$EXEC_PATH" ]]; then
|
||||
# fallback to python -m
|
||||
EXEC_PATH="$VENV_PY -m medeia_macina.cli_entry"
|
||||
fi
|
||||
|
||||
APPDIR="$HOME/.local/share/applications"
|
||||
mkdir -p "$APPDIR"
|
||||
DESKTOP_FILE="$APPDIR/medeia-macina.desktop"
|
||||
cat > "$DESKTOP_FILE" <<EOF
|
||||
[Desktop Entry]
|
||||
Name=Medeia-Macina
|
||||
Comment=Launch Medeia-Macina
|
||||
Exec=$EXEC_PATH
|
||||
Terminal=true
|
||||
Type=Application
|
||||
Categories=Utility;
|
||||
EOF
|
||||
chmod +x "$DESKTOP_FILE" || true
|
||||
if [[ -d "$HOME/Desktop" ]]; then
|
||||
cp "$DESKTOP_FILE" "$HOME/Desktop/"
|
||||
chmod +x "$HOME/Desktop/$(basename "$DESKTOP_FILE")" || true
|
||||
fi
|
||||
echo "Desktop launcher created: $DESKTOP_FILE"
|
||||
fi
|
||||
|
||||
# Install a global 'mm' launcher so it can be invoked from any shell.
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
REPO="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
USER_BIN="${XDG_BIN_HOME:-$HOME/.local/bin}"
|
||||
mkdir -p "$USER_BIN"
|
||||
if [[ -f "$USER_BIN/mm" ]]; then
|
||||
echo "Backing up existing $USER_BIN/mm to $USER_BIN/mm.bak.$(date +%s)"
|
||||
mv "$USER_BIN/mm" "$USER_BIN/mm.bak.$(date +%s)"
|
||||
fi
|
||||
cat > "$USER_BIN/mm" <<EOF
|
||||
#!/usr/bin/env bash
|
||||
REPO="$REPO"
|
||||
VENV="$REPO/.venv"
|
||||
if [ -x "$VENV/bin/mm" ]; then
|
||||
exec "$VENV/bin/mm" "$@"
|
||||
elif [ -x "$VENV/bin/python" ]; then
|
||||
exec "$VENV/bin/python" -m medeia_macina.cli_entry "$@"
|
||||
else
|
||||
exec python -m medeia_macina.cli_entry "$@"
|
||||
fi
|
||||
EOF
|
||||
chmod +x "$USER_BIN/mm"
|
||||
|
||||
# Quick verification of the global launcher; helps catch packaging issues early.
|
||||
if "$USER_BIN/mm" --help >/dev/null 2>&1; then
|
||||
echo "Global 'mm' launcher verified: $USER_BIN/mm runs correctly."
|
||||
else
|
||||
echo "Warning: Global 'mm' launcher failed to run in this shell. Ensure $USER_BIN is on your PATH and the venv is installed; try: $VENV/bin/python -m medeia_macina.cli_entry --help" >&2
|
||||
fi
|
||||
|
||||
# Ensure the user's bin is on PATH for future sessions by adding to ~/.profile if needed
|
||||
if ! echo ":$PATH:" | grep -q ":$USER_BIN:"; then
|
||||
PROFILE="$HOME/.profile"
|
||||
if [ ! -f "$PROFILE" ]; then
|
||||
if [ -f "$HOME/.bash_profile" ]; then
|
||||
PROFILE="$HOME/.bash_profile"
|
||||
elif [ -f "$HOME/.bashrc" ]; then
|
||||
PROFILE="$HOME/.bashrc"
|
||||
elif [ -f "$HOME/.zshrc" ]; then
|
||||
PROFILE="$HOME/.zshrc"
|
||||
else
|
||||
PROFILE="$HOME/.profile"
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! grep -q "ensure user local bin is on PATH" "$PROFILE" 2>/dev/null; then
|
||||
cat >> "$PROFILE" <<PROFILE_SNIPPET
|
||||
# Added by Medeia-Macina setup: ensure user local bin is on PATH
|
||||
if [ -d "$HOME/.local/bin" ] && [[ ":$PATH:" != *":$HOME/.local/bin:"* ]]; then
|
||||
PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
PROFILE_SNIPPET
|
||||
echo "Added $USER_BIN export to $PROFILE; restart your shell or source $PROFILE to use 'mm' from anywhere"
|
||||
fi
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
||||
Bootstrap complete.
|
||||
To activate the virtualenv:
|
||||
source $VENV_PATH/bin/activate
|
||||
To run the app:
|
||||
$VENV_PATH/bin/mm # if installed as a console script
|
||||
$VENV_PY -m medeia_macina.cli_entry # alternative
|
||||
|
||||
Global launcher installed: $USER_BIN/mm
|
||||
EOF
|
||||
227
scripts/setup.py
227
scripts/setup.py
@@ -32,6 +32,8 @@ import sys
|
||||
from pathlib import Path
|
||||
import platform
|
||||
import shutil
|
||||
import os
|
||||
import time
|
||||
|
||||
|
||||
def run(cmd: list[str]) -> None:
|
||||
@@ -132,6 +134,50 @@ def main() -> int:
|
||||
if sys.version_info < (3, 8):
|
||||
print("Warning: Python 3.8+ is recommended.", file=sys.stderr)
|
||||
|
||||
# Opinionated: always create or use a local venv at the project root (.venv)
|
||||
venv_dir = repo_root / ".venv"
|
||||
|
||||
def _venv_python(p: Path) -> Path:
|
||||
if platform.system().lower() == "windows":
|
||||
return p / "Scripts" / "python.exe"
|
||||
return p / "bin" / "python"
|
||||
|
||||
def _ensure_local_venv() -> Path:
|
||||
"""Create (if missing) and return the path to the venv's python executable.
|
||||
|
||||
This is intentionally opinionated: we keep a venv at `./.venv` in the repo root
|
||||
and use that for all package operations to keep developer environments reproducible.
|
||||
"""
|
||||
try:
|
||||
if not venv_dir.exists():
|
||||
print(f"Creating local virtualenv at: {venv_dir}")
|
||||
run([sys.executable, "-m", "venv", str(venv_dir)])
|
||||
else:
|
||||
print(f"Using existing virtualenv at: {venv_dir}")
|
||||
|
||||
py = _venv_python(venv_dir)
|
||||
if not py.exists():
|
||||
# Try recreating venv if python is missing
|
||||
print(f"Local venv python not found at {py}; recreating venv")
|
||||
run([sys.executable, "-m", "venv", str(venv_dir)])
|
||||
py = _venv_python(venv_dir)
|
||||
if not py.exists():
|
||||
raise RuntimeError(f"Unable to locate venv python at {py}")
|
||||
return py
|
||||
except subprocess.CalledProcessError as exc:
|
||||
print(f"Failed to create or prepare local venv: {exc}", file=sys.stderr)
|
||||
raise
|
||||
|
||||
# Ensure a local venv is present and use it for subsequent installs.
|
||||
venv_python = _ensure_local_venv()
|
||||
print(f"Using venv python: {venv_python}")
|
||||
|
||||
# Enforce opinionated behavior: install deps, playwright, deno, and install project in editable mode.
|
||||
# Ignore `--skip-deps` and `--install-editable` flags to keep the setup deterministic.
|
||||
args.skip_deps = False
|
||||
args.install_editable = True
|
||||
args.no_playwright = False
|
||||
|
||||
try:
|
||||
if args.playwright_only:
|
||||
if not playwright_package_installed():
|
||||
@@ -150,21 +196,21 @@ def main() -> int:
|
||||
return 0
|
||||
|
||||
if args.upgrade_pip:
|
||||
print("Upgrading pip, setuptools, and wheel...")
|
||||
run([sys.executable, "-m", "pip", "install", "--upgrade", "pip", "setuptools", "wheel"])
|
||||
print("Upgrading pip, setuptools, and wheel in local venv...")
|
||||
run([str(venv_python), "-m", "pip", "install", "--upgrade", "pip", "setuptools", "wheel"])
|
||||
|
||||
if not args.skip_deps:
|
||||
req_file = repo_root / "requirements.txt"
|
||||
if not req_file.exists():
|
||||
print(f"requirements.txt not found at {req_file}; skipping dependency installation.", file=sys.stderr)
|
||||
else:
|
||||
print(f"Installing Python dependencies from {req_file}...")
|
||||
run([sys.executable, "-m", "pip", "install", "-r", str(req_file)])
|
||||
print(f"Installing Python dependencies into local venv from {req_file}...")
|
||||
run([str(venv_python), "-m", "pip", "install", "-r", str(req_file)])
|
||||
|
||||
if not args.no_playwright:
|
||||
if not playwright_package_installed():
|
||||
print("'playwright' package not installed; installing it...")
|
||||
run([sys.executable, "-m", "pip", "install", "playwright"])
|
||||
print("'playwright' package not installed in venv; installing it...")
|
||||
run([str(venv_python), "-m", "pip", "install", "playwright"])
|
||||
|
||||
print("Installing Playwright browsers (this may download several hundred MB)...")
|
||||
try:
|
||||
@@ -173,12 +219,14 @@ def main() -> int:
|
||||
print(f"Error: {exc}", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
# Run Playwright install using the venv's python so binaries are available in venv
|
||||
cmd[0] = str(venv_python)
|
||||
run(cmd)
|
||||
|
||||
# Optional: install the project in editable mode so tests can import the package
|
||||
if args.install_editable:
|
||||
print("Installing project in editable mode (pip install -e .) ...")
|
||||
run([sys.executable, "-m", "pip", "install", "-e", "."])
|
||||
# Install the project into the local venv (editable mode is the default, opinionated)
|
||||
print("Installing project into local venv (editable mode)")
|
||||
run([str(venv_python), "-m", "pip", "install", "-e", "."])
|
||||
|
||||
# Optional: install Deno runtime (default: install unless --no-deno is passed)
|
||||
install_deno_requested = True
|
||||
@@ -188,12 +236,171 @@ def main() -> int:
|
||||
install_deno_requested = True
|
||||
|
||||
if install_deno_requested:
|
||||
print("Installing Deno runtime...")
|
||||
print("Installing Deno runtime (local/system)...")
|
||||
rc = _install_deno(args.deno_version)
|
||||
if rc != 0:
|
||||
print("Deno installation failed.", file=sys.stderr)
|
||||
return rc
|
||||
|
||||
# Write project-local launcher scripts (project root) that prefer the local .venv
|
||||
def _write_launchers():
|
||||
sh = repo_root / "mm"
|
||||
ps1 = repo_root / "mm.ps1"
|
||||
bat = repo_root / "mm.bat"
|
||||
|
||||
sh_text = """#!/usr/bin/env bash
|
||||
set -e
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
VENV="$SCRIPT_DIR/.venv"
|
||||
if [ -x "$VENV/bin/mm" ]; then
|
||||
exec "$VENV/bin/mm" "$@"
|
||||
elif [ -x "$VENV/bin/python" ]; then
|
||||
exec "$VENV/bin/python" -m medeia_entry "$@"
|
||||
else
|
||||
exec python -m medeia_entry "$@"
|
||||
fi
|
||||
"""
|
||||
try:
|
||||
sh.write_text(sh_text, encoding="utf-8")
|
||||
sh.chmod(sh.stat().st_mode | 0o111)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
ps1_text = r"""Param([Parameter(ValueFromRemainingArguments=$true)] $args)
|
||||
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
||||
$venv = Join-Path $scriptDir '.venv'
|
||||
$exe = Join-Path $venv 'Scripts\mm.exe'
|
||||
if (Test-Path $exe) { & $exe @args; exit $LASTEXITCODE }
|
||||
$py = Join-Path $venv 'Scripts\python.exe'
|
||||
if (Test-Path $py) { & $py -m medeia_entry @args; exit $LASTEXITCODE }
|
||||
# fallback
|
||||
python -m medeia_entry @args
|
||||
"""
|
||||
try:
|
||||
ps1.write_text(ps1_text, encoding="utf-8")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
bat_text = (
|
||||
"@echo off\r\n"
|
||||
"set SCRIPT_DIR=%~dp0\r\n"
|
||||
"if exist \"%SCRIPT_DIR%\\.venv\\Scripts\\mm.exe\" \"%SCRIPT_DIR%\\.venv\\Scripts\\mm.exe\" %*\r\n"
|
||||
"if exist \"%SCRIPT_DIR%\\.venv\\Scripts\\python.exe\" \"%SCRIPT_DIR%\\.venv\\Scripts\\python.exe\" -m medeia_entry %*\r\n"
|
||||
"python -m medeia_entry %*\r\n"
|
||||
)
|
||||
try:
|
||||
bat.write_text(bat_text, encoding="utf-8")
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
_write_launchers()
|
||||
|
||||
# Install user-global shims so `mm` can be executed from any shell session.
|
||||
def _install_user_shims(repo: Path) -> None:
|
||||
try:
|
||||
home = Path.home()
|
||||
system = platform.system().lower()
|
||||
|
||||
if system == "windows":
|
||||
user_bin = Path(os.environ.get("USERPROFILE", str(home))) / "bin"
|
||||
user_bin.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write mm.cmd (CMD shim)
|
||||
mm_cmd = user_bin / "mm.cmd"
|
||||
cmd_text = (
|
||||
f"@echo off\r\n"
|
||||
f"set REPO={repo}\r\n"
|
||||
f"if exist \"%REPO%\\.venv\\Scripts\\mm.exe\" \"%REPO%\\.venv\\Scripts\\mm.exe\" %*\r\n"
|
||||
f"if exist \"%REPO%\\.venv\\Scripts\\python.exe\" \"%REPO%\\.venv\\Scripts\\python.exe\" -m medeia_entry %*\r\n"
|
||||
f"python -m medeia_entry %*\r\n"
|
||||
)
|
||||
if mm_cmd.exists():
|
||||
bak = mm_cmd.with_suffix(f".bak{int(time.time())}")
|
||||
mm_cmd.replace(bak)
|
||||
mm_cmd.write_text(cmd_text, encoding="utf-8")
|
||||
|
||||
# Write mm.ps1 (PowerShell shim)
|
||||
mm_ps1 = user_bin / "mm.ps1"
|
||||
ps1_text = (
|
||||
"Param([Parameter(ValueFromRemainingArguments=$true)] $args)\n"
|
||||
f"$repo = \"{repo}\"\n"
|
||||
"$venv = Join-Path $repo '.venv'\n"
|
||||
"$exe = Join-Path $venv 'Scripts\\mm.exe'\n"
|
||||
"if (Test-Path $exe) { & $exe @args; exit $LASTEXITCODE }\n"
|
||||
"$py = Join-Path $venv 'Scripts\\python.exe'\n"
|
||||
"if (Test-Path $py) { & $py -m medeia_entry @args; exit $LASTEXITCODE }\n"
|
||||
"python -m medeia_entry @args\n"
|
||||
)
|
||||
if mm_ps1.exists():
|
||||
bak = mm_ps1.with_suffix(f".bak{int(time.time())}")
|
||||
mm_ps1.replace(bak)
|
||||
mm_ps1.write_text(ps1_text, encoding="utf-8")
|
||||
|
||||
# Attempt to add user_bin to the user's PATH if it's not present.
|
||||
try:
|
||||
cur = os.environ.get("PATH", "")
|
||||
str_bin = str(user_bin)
|
||||
if str_bin not in cur:
|
||||
ps_cmd = (
|
||||
"$bin = '{bin}';"
|
||||
"$cur = [Environment]::GetEnvironmentVariable('PATH','User');"
|
||||
"if ($cur -notlike \"*$bin*\") {[Environment]::SetEnvironmentVariable('PATH', ($bin + ';' + ($cur -ne $null ? $cur : '')), 'User')}"
|
||||
).format(bin=str_bin.replace('\\','\\\\'))
|
||||
subprocess.run(["powershell","-NoProfile","-Command", ps_cmd], check=False)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
print(f"Installed global launchers to: {user_bin}")
|
||||
|
||||
else:
|
||||
# POSIX
|
||||
user_bin = Path(os.environ.get("XDG_BIN_HOME", str(home / ".local/bin")))
|
||||
user_bin.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
mm_sh = user_bin / "mm"
|
||||
sh_text = (
|
||||
"#!/usr/bin/env bash\n"
|
||||
f"REPO=\"{repo}\"\n"
|
||||
"VENV=\"$REPO/.venv\"\n"
|
||||
"if [ -x \"$VENV/bin/mm\" ]; then\n"
|
||||
" exec \"$VENV/bin/mm\" \"$@\"\n"
|
||||
"elif [ -x \"$VENV/bin/python\" ]; then\n"
|
||||
" exec \"$VENV/bin/python\" -m medeia_entry \"$@\"\n"
|
||||
"else\n"
|
||||
" exec python -m medeia_entry \"$@\"\n"
|
||||
"fi\n"
|
||||
)
|
||||
if mm_sh.exists():
|
||||
bak = mm_sh.with_suffix(f".bak{int(time.time())}")
|
||||
mm_sh.replace(bak)
|
||||
mm_sh.write_text(sh_text, encoding="utf-8")
|
||||
mm_sh.chmod(mm_sh.stat().st_mode | 0o111)
|
||||
|
||||
# Ensure the user's bin is on PATH for future sessions by adding to ~/.profile
|
||||
cur_path = os.environ.get("PATH", "")
|
||||
if str(user_bin) not in cur_path:
|
||||
profile = home / ".profile"
|
||||
snippet = (
|
||||
"# Added by Medeia-Macina setup: ensure user local bin is on PATH\n"
|
||||
"if [ -d \"$HOME/.local/bin\" ] && [[ \":$PATH:\" != *\":$HOME/.local/bin:\"* ]]; then\n"
|
||||
" PATH=\"$HOME/.local/bin:$PATH\"\n"
|
||||
"fi\n"
|
||||
)
|
||||
try:
|
||||
txt = profile.read_text() if profile.exists() else ""
|
||||
if snippet.strip() not in txt:
|
||||
with profile.open("a", encoding="utf-8") as fh:
|
||||
fh.write("\n" + snippet)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
print(f"Installed global launcher to: {mm_sh}")
|
||||
|
||||
except Exception as exc: # pragma: no cover - best effort
|
||||
print(f"Failed to install global shims: {exc}", file=sys.stderr)
|
||||
|
||||
_install_user_shims(repo_root)
|
||||
|
||||
print("Setup complete.")
|
||||
return 0
|
||||
|
||||
|
||||
Reference in New Issue
Block a user