khh
Some checks failed
smoke-mm / Install & smoke test mm --help (push) Has been cancelled
Some checks failed
smoke-mm / Install & smoke test mm --help (push) Has been cancelled
This commit is contained in:
32
.github/workflows/smoke-mm.yml
vendored
Normal file
32
.github/workflows/smoke-mm.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: smoke-mm
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
smoke:
|
||||
name: Install & smoke test mm --help
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: '3.11'
|
||||
|
||||
- name: Create venv and install
|
||||
run: |
|
||||
python -m venv venv
|
||||
. venv/bin/activate
|
||||
python -m pip install -U pip
|
||||
python -m pip install -e .
|
||||
|
||||
- name: Run smoke test (mm --help)
|
||||
run: |
|
||||
. venv/bin/activate
|
||||
mm --help
|
||||
207
MPV/LUA/main.lua
207
MPV/LUA/main.lua
@@ -4,7 +4,7 @@ local msg = require 'mp.msg'
|
||||
|
||||
local M = {}
|
||||
|
||||
local MEDEIA_LUA_VERSION = '2025-12-18'
|
||||
local MEDEIA_LUA_VERSION = '2025-12-24'
|
||||
|
||||
-- Track whether uosc is available so menu calls don't fail with
|
||||
-- "Can't find script 'uosc' to send message to."
|
||||
@@ -159,6 +159,8 @@ local function write_temp_log(prefix, text)
|
||||
|
||||
local dir = ''
|
||||
-- Prefer repo-root Log/ for easier discovery.
|
||||
-- NOTE: Avoid spawning cmd.exe/sh just to mkdir on Windows/Linux; console flashes are
|
||||
-- highly undesirable. If the directory doesn't exist, we fall back to TEMP.
|
||||
do
|
||||
local function find_up(start_dir, relative_path, max_levels)
|
||||
local d = start_dir
|
||||
@@ -186,13 +188,6 @@ local function write_temp_log(prefix, text)
|
||||
local parent = cli:match('(.*)[/\\]') or ''
|
||||
if parent ~= '' then
|
||||
dir = utils.join_path(parent, 'Log')
|
||||
-- Best-effort create dir.
|
||||
local sep = package and package.config and package.config:sub(1, 1) or '/'
|
||||
if sep == '\\' then
|
||||
pcall(utils.subprocess, { args = { 'cmd.exe', '/c', 'mkdir "' .. dir .. '" 1>nul 2>nul' } })
|
||||
else
|
||||
pcall(utils.subprocess, { args = { 'sh', '-lc', 'mkdir -p ' .. string.format('%q', dir) .. ' >/dev/null 2>&1' } })
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -206,9 +201,17 @@ local function write_temp_log(prefix, text)
|
||||
local name = (prefix or 'medeia-mpv') .. '-' .. tostring(math.floor(mp.get_time() * 1000)) .. '.log'
|
||||
local path = utils.join_path(dir, name)
|
||||
local fh = io.open(path, 'w')
|
||||
if not fh then
|
||||
-- If Log/ wasn't created (or is not writable), fall back to TEMP.
|
||||
local tmp = os.getenv('TEMP') or os.getenv('TMP') or ''
|
||||
if tmp ~= '' and tmp ~= dir then
|
||||
path = utils.join_path(tmp, name)
|
||||
fh = io.open(path, 'w')
|
||||
end
|
||||
if not fh then
|
||||
return nil
|
||||
end
|
||||
end
|
||||
fh:write(text)
|
||||
fh:close()
|
||||
return path
|
||||
@@ -350,6 +353,30 @@ local function _is_windows()
|
||||
return sep == '\\'
|
||||
end
|
||||
|
||||
local function _resolve_python_exe(prefer_no_console)
|
||||
local python = (opts and opts.python_path) and tostring(opts.python_path) or 'python'
|
||||
if (not prefer_no_console) or (not _is_windows()) then
|
||||
return python
|
||||
end
|
||||
|
||||
local low = tostring(python):lower()
|
||||
if low == 'python' then
|
||||
return 'pythonw'
|
||||
end
|
||||
if low == 'python.exe' then
|
||||
return 'pythonw.exe'
|
||||
end
|
||||
if low:sub(-10) == 'python.exe' then
|
||||
local candidate = python:sub(1, #python - 10) .. 'pythonw.exe'
|
||||
if utils.file_info(candidate) then
|
||||
return candidate
|
||||
end
|
||||
return 'pythonw'
|
||||
end
|
||||
-- Already pythonw or some other launcher.
|
||||
return python
|
||||
end
|
||||
|
||||
local function _extract_target_from_memory_uri(text)
|
||||
if type(text) ~= 'string' then
|
||||
return nil
|
||||
@@ -475,10 +502,10 @@ end
|
||||
local function _get_current_item_is_image()
|
||||
local video_info = mp.get_property_native('current-tracks/video')
|
||||
if type(video_info) == 'table' then
|
||||
if video_info.image and not video_info.albumart then
|
||||
if video_info.image == true then
|
||||
return true
|
||||
end
|
||||
if video_info.image == false and video_info.albumart == true then
|
||||
if video_info.image == false then
|
||||
return false
|
||||
end
|
||||
end
|
||||
@@ -489,8 +516,6 @@ local function _get_current_item_is_image()
|
||||
return false
|
||||
end
|
||||
|
||||
-- Cover art / splash support disabled (removed per user request)
|
||||
|
||||
|
||||
local function _set_image_property(value)
|
||||
pcall(mp.set_property_native, 'user-data/mpv/image', value and true or false)
|
||||
@@ -789,7 +814,8 @@ local function _pick_folder_windows()
|
||||
-- Native folder picker via PowerShell + WinForms.
|
||||
local ps = [[Add-Type -AssemblyName System.Windows.Forms; $d = New-Object System.Windows.Forms.FolderBrowserDialog; $d.Description = 'Select download folder'; $d.ShowNewFolderButton = $true; if ($d.ShowDialog() -eq [System.Windows.Forms.DialogResult]::OK) { $d.SelectedPath }]]
|
||||
local res = utils.subprocess({
|
||||
args = { 'powershell', '-NoProfile', '-STA', '-ExecutionPolicy', 'Bypass', '-Command', ps },
|
||||
-- Hide the PowerShell console window (dialog still shows).
|
||||
args = { 'powershell', '-NoProfile', '-WindowStyle', 'Hidden', '-STA', '-ExecutionPolicy', 'Bypass', '-Command', ps },
|
||||
cancellable = false,
|
||||
})
|
||||
if res and res.status == 0 and res.stdout then
|
||||
@@ -807,8 +833,8 @@ local ensure_pipeline_helper_running
|
||||
local function _run_helper_request_response(req, timeout_seconds)
|
||||
_last_ipc_error = ''
|
||||
if not ensure_pipeline_helper_running() then
|
||||
_lua_log('ipc: helper not running; cannot execute request')
|
||||
_last_ipc_error = 'helper not running'
|
||||
_lua_log('ipc: helper not ready; cannot execute request')
|
||||
_last_ipc_error = 'helper not ready'
|
||||
return nil
|
||||
end
|
||||
|
||||
@@ -824,7 +850,6 @@ local function _run_helper_request_response(req, timeout_seconds)
|
||||
local rv = tostring(mp.get_property_native(PIPELINE_READY_PROP))
|
||||
_lua_log('ipc: helper not ready; ready=' .. rv)
|
||||
_last_ipc_error = 'helper not ready (ready=' .. rv .. ')'
|
||||
_pipeline_helper_started = false
|
||||
return nil
|
||||
end
|
||||
end
|
||||
@@ -875,7 +900,6 @@ local function _run_helper_request_response(req, timeout_seconds)
|
||||
|
||||
_lua_log('ipc: timeout waiting response; ' .. label)
|
||||
_last_ipc_error = 'timeout waiting response (' .. label .. ')'
|
||||
_pipeline_helper_started = false
|
||||
return nil
|
||||
end
|
||||
|
||||
@@ -893,56 +917,6 @@ local function _refresh_store_cache(timeout_seconds)
|
||||
local resp = _run_helper_request_response({ op = 'store-choices' }, timeout_seconds or 1)
|
||||
if not resp or not resp.success or type(resp.choices) ~= 'table' then
|
||||
_lua_log('stores: failed to load store choices via helper; stderr=' .. tostring(resp and resp.stderr or '') .. ' error=' .. tostring(resp and resp.error or ''))
|
||||
|
||||
-- Fallback: directly call Python to import MedeiaCLI.get_store_choices().
|
||||
-- This avoids helper IPC issues and still stays in sync with the REPL.
|
||||
local python = (opts and opts.python_path) and tostring(opts.python_path) or 'python'
|
||||
local cli_path = (opts and opts.cli_path) and tostring(opts.cli_path) or nil
|
||||
if not cli_path or cli_path == '' or not utils.file_info(cli_path) then
|
||||
local base_dir = mp.get_script_directory() or utils.getcwd() or ''
|
||||
if base_dir ~= '' then
|
||||
cli_path = find_file_upwards(base_dir, 'CLI.py', 8)
|
||||
end
|
||||
end
|
||||
|
||||
if cli_path and cli_path ~= '' then
|
||||
local root = tostring(cli_path):match('(.*)[/\\]') or ''
|
||||
if root ~= '' then
|
||||
local code = "import json, sys; sys.path.insert(0, r'" .. root .. "'); from CLI import MedeiaCLI; print(json.dumps(MedeiaCLI.get_store_choices()))"
|
||||
local res = utils.subprocess({
|
||||
args = { python, '-c', code },
|
||||
cancellable = false,
|
||||
})
|
||||
if res and res.status == 0 and res.stdout then
|
||||
local out_text = tostring(res.stdout)
|
||||
local last_line = ''
|
||||
for line in out_text:gmatch('[^\r\n]+') do
|
||||
if trim(line) ~= '' then
|
||||
last_line = line
|
||||
end
|
||||
end
|
||||
local ok, parsed = pcall(utils.parse_json, last_line ~= '' and last_line or out_text)
|
||||
if ok and type(parsed) == 'table' then
|
||||
local out = {}
|
||||
for _, v in ipairs(parsed) do
|
||||
local name = trim(tostring(v or ''))
|
||||
if name ~= '' then
|
||||
out[#out + 1] = name
|
||||
end
|
||||
end
|
||||
if #out > 0 then
|
||||
_cached_store_names = out
|
||||
_store_cache_loaded = true
|
||||
_lua_log('stores: loaded ' .. tostring(#_cached_store_names) .. ' stores via python fallback')
|
||||
return true
|
||||
end
|
||||
end
|
||||
else
|
||||
_lua_log('stores: python fallback failed; status=' .. tostring(res and res.status or 'nil') .. ' stderr=' .. tostring(res and res.stderr or ''))
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
return false
|
||||
end
|
||||
|
||||
@@ -1295,11 +1269,8 @@ local function _run_pipeline_detached(pipeline_cmd)
|
||||
if not pipeline_cmd or pipeline_cmd == '' then
|
||||
return false
|
||||
end
|
||||
local python = (opts and opts.python_path) and tostring(opts.python_path) or 'python'
|
||||
local cli = (opts and opts.cli_path) and tostring(opts.cli_path) or 'CLI.py'
|
||||
local args = { python, cli, 'pipeline', '--pipeline', pipeline_cmd }
|
||||
local ok = utils.subprocess_detached({ args = args })
|
||||
return ok ~= nil
|
||||
local resp = _run_helper_request_response({ op = 'run-detached', data = { pipeline = pipeline_cmd } }, 1.0)
|
||||
return (resp and resp.success) and true or false
|
||||
end
|
||||
|
||||
local function _open_save_location_picker_for_pending_download()
|
||||
@@ -1659,62 +1630,10 @@ mp.register_script_message('medios-download-pick-path', function()
|
||||
end)
|
||||
|
||||
ensure_pipeline_helper_running = function()
|
||||
-- If a helper is already running (e.g. started by the launcher), just use it.
|
||||
if _is_pipeline_helper_ready() then
|
||||
_pipeline_helper_started = true
|
||||
return true
|
||||
end
|
||||
|
||||
-- We tried to start a helper before but it isn't ready anymore; restart.
|
||||
if _pipeline_helper_started then
|
||||
_pipeline_helper_started = false
|
||||
end
|
||||
|
||||
local helper_path = nil
|
||||
|
||||
-- Prefer deriving repo root from located CLI.py if available.
|
||||
if opts and opts.cli_path and utils.file_info(opts.cli_path) then
|
||||
local root = tostring(opts.cli_path):match('(.*)[/\\]') or ''
|
||||
if root ~= '' then
|
||||
local candidate = utils.join_path(root, 'MPV/pipeline_helper.py')
|
||||
if utils.file_info(candidate) then
|
||||
helper_path = candidate
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
if not helper_path then
|
||||
local base_dir = mp.get_script_directory() or ""
|
||||
if base_dir == "" then
|
||||
base_dir = utils.getcwd() or ""
|
||||
end
|
||||
helper_path = find_file_upwards(base_dir, 'MPV/pipeline_helper.py', 8)
|
||||
end
|
||||
if not helper_path then
|
||||
_lua_log('ipc: cannot find helper script MPV/pipeline_helper.py (script_dir=' .. tostring(mp.get_script_directory() or '') .. ')')
|
||||
return false
|
||||
end
|
||||
|
||||
-- Ensure mpv actually has a JSON IPC server for the helper to connect to.
|
||||
if not ensure_mpv_ipc_server() then
|
||||
_lua_log('ipc: mpv input-ipc-server is not set; start mpv with --input-ipc-server=\\\\.\\pipe\\mpv-medeia-macina')
|
||||
return false
|
||||
end
|
||||
|
||||
local python = (opts and opts.python_path) and tostring(opts.python_path) or 'python'
|
||||
local ipc = get_mpv_ipc_path()
|
||||
-- Give the helper enough time to connect (Windows pipe can take a moment).
|
||||
local args = {python, helper_path, '--ipc', ipc, '--timeout', '30'}
|
||||
_lua_log('ipc: starting helper: ' .. table.concat(args, ' '))
|
||||
|
||||
local ok = utils.subprocess_detached({ args = args })
|
||||
if ok == nil then
|
||||
_lua_log('ipc: failed to start helper (subprocess_detached returned nil)')
|
||||
return false
|
||||
end
|
||||
|
||||
_pipeline_helper_started = true
|
||||
return true
|
||||
-- IMPORTANT: do NOT spawn Python from inside mpv.
|
||||
-- The Python side (MPV.mpv_ipc) starts pipeline_helper.py using Windows
|
||||
-- no-console flags; spawning here can flash a console window.
|
||||
return _is_pipeline_helper_ready() and true or false
|
||||
end
|
||||
|
||||
local function run_pipeline_via_ipc(pipeline_cmd, seeds, timeout_seconds)
|
||||
@@ -1824,34 +1743,9 @@ function M.run_pipeline(pipeline_cmd, seeds)
|
||||
return nil
|
||||
end
|
||||
|
||||
local args = {opts.python_path, opts.cli_path, "pipeline", "--pipeline", pipeline_cmd}
|
||||
|
||||
if seeds then
|
||||
local seeds_json = utils.format_json(seeds)
|
||||
table.insert(args, "--seeds-json")
|
||||
table.insert(args, seeds_json)
|
||||
end
|
||||
|
||||
_lua_log("Running pipeline: " .. pipeline_cmd)
|
||||
-- If the persistent IPC helper isn't available, fall back to a subprocess.
|
||||
-- Note: mpv's subprocess helper does not support an `env` parameter.
|
||||
local res = utils.subprocess({
|
||||
args = args,
|
||||
cancellable = false,
|
||||
})
|
||||
|
||||
if res.status ~= 0 then
|
||||
local err = (res.stderr and res.stderr ~= "") and res.stderr
|
||||
or (res.error_string and res.error_string ~= "") and res.error_string
|
||||
or "unknown"
|
||||
local log_path = write_temp_log('medeia-cli-pipeline-stderr', tostring(res.stderr or err))
|
||||
local suffix = log_path and (' (log: ' .. log_path .. ')') or ''
|
||||
_lua_log("Pipeline error: " .. err .. suffix)
|
||||
mp.osd_message("Error: pipeline failed" .. suffix, 6)
|
||||
mp.osd_message('Error: pipeline helper not available', 6)
|
||||
_lua_log('ipc: helper not available; refusing to spawn python subprocess')
|
||||
return nil
|
||||
end
|
||||
|
||||
return res.stdout
|
||||
end
|
||||
|
||||
-- Helper to run pipeline and parse JSON output
|
||||
@@ -2132,13 +2026,12 @@ mp.add_key_binding("ctrl+del", "medios-delete", M.delete_current_file)
|
||||
mp.add_key_binding("l", "medeia-lyric-toggle", lyric_toggle)
|
||||
mp.add_key_binding("L", "medeia-lyric-toggle-shift", lyric_toggle)
|
||||
|
||||
-- Cover art observers removed (disabled per user request)
|
||||
|
||||
|
||||
-- Start the persistent pipeline helper eagerly at launch.
|
||||
-- This avoids spawning Python per command and works cross-platform via MPV IPC.
|
||||
mp.add_timeout(0, function()
|
||||
pcall(ensure_mpv_ipc_server)
|
||||
pcall(ensure_pipeline_helper_running)
|
||||
pcall(_lua_log, 'medeia-lua loaded version=' .. MEDEIA_LUA_VERSION)
|
||||
end)
|
||||
|
||||
|
||||
@@ -33,6 +33,29 @@ _LYRIC_LOG_FH: Optional[Any] = None
|
||||
_MPV_AVAILABILITY_CACHE: Optional[Tuple[bool, Optional[str]]] = None
|
||||
|
||||
|
||||
def _windows_pythonw_exe(python_exe: Optional[str]) -> Optional[str]:
|
||||
"""Return a pythonw.exe adjacent to python.exe if available (Windows only)."""
|
||||
if platform.system() != "Windows":
|
||||
return python_exe
|
||||
try:
|
||||
exe = str(python_exe or "").strip()
|
||||
except Exception:
|
||||
exe = ""
|
||||
if not exe:
|
||||
return None
|
||||
low = exe.lower()
|
||||
if low.endswith("pythonw.exe"):
|
||||
return exe
|
||||
if low.endswith("python.exe"):
|
||||
try:
|
||||
candidate = exe[:-10] + "pythonw.exe"
|
||||
if os.path.exists(candidate):
|
||||
return candidate
|
||||
except Exception:
|
||||
pass
|
||||
return exe
|
||||
|
||||
|
||||
def _windows_hidden_subprocess_kwargs() -> Dict[str, Any]:
|
||||
"""Best-effort kwargs to avoid flashing console windows on Windows.
|
||||
|
||||
@@ -413,8 +436,12 @@ class MPV:
|
||||
except Exception:
|
||||
repo_root = Path.cwd()
|
||||
|
||||
py = sys.executable
|
||||
if platform.system() == "Windows":
|
||||
py = _windows_pythonw_exe(py) or py
|
||||
|
||||
cmd: List[str] = [
|
||||
sys.executable,
|
||||
py or "python",
|
||||
"-m",
|
||||
"MPV.lyric",
|
||||
"--ipc",
|
||||
@@ -448,7 +475,18 @@ class MPV:
|
||||
# Make the current directory the repo root so `-m MPV.lyric` resolves reliably.
|
||||
kwargs["cwd"] = str(repo_root)
|
||||
if platform.system() == "Windows":
|
||||
kwargs["creationflags"] = 0x00000008 # DETACHED_PROCESS
|
||||
# Ensure we don't flash a console window when spawning the helper.
|
||||
flags = 0
|
||||
try:
|
||||
flags |= int(getattr(subprocess, "DETACHED_PROCESS", 0x00000008))
|
||||
except Exception:
|
||||
flags |= 0x00000008
|
||||
try:
|
||||
flags |= int(getattr(subprocess, "CREATE_NO_WINDOW", 0x08000000))
|
||||
except Exception:
|
||||
flags |= 0x08000000
|
||||
kwargs["creationflags"] = flags
|
||||
kwargs.update({k: v for k, v in _windows_hidden_subprocess_kwargs().items() if k != "creationflags"})
|
||||
|
||||
_LYRIC_PROCESS = subprocess.Popen(cmd, **kwargs)
|
||||
debug(f"Lyric loader started (log={log_path})")
|
||||
@@ -582,6 +620,8 @@ class MPV:
|
||||
helper_path = (repo_root / "MPV" / "pipeline_helper.py").resolve()
|
||||
if helper_path.exists():
|
||||
py = sys.executable or "python"
|
||||
if platform.system() == "Windows":
|
||||
py = _windows_pythonw_exe(py) or py
|
||||
helper_cmd = [
|
||||
py,
|
||||
str(helper_path),
|
||||
@@ -591,6 +631,13 @@ class MPV:
|
||||
"30",
|
||||
]
|
||||
|
||||
helper_env = os.environ.copy()
|
||||
try:
|
||||
existing_pp = helper_env.get("PYTHONPATH")
|
||||
helper_env["PYTHONPATH"] = str(repo_root) if not existing_pp else (str(repo_root) + os.pathsep + str(existing_pp))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
helper_kwargs: Dict[str, Any] = {}
|
||||
if platform.system() == "Windows":
|
||||
flags = 0
|
||||
@@ -605,6 +652,9 @@ class MPV:
|
||||
helper_kwargs["creationflags"] = flags
|
||||
helper_kwargs.update({k: v for k, v in _windows_hidden_subprocess_kwargs().items() if k != "creationflags"})
|
||||
|
||||
helper_kwargs["cwd"] = str(repo_root)
|
||||
helper_kwargs["env"] = helper_env
|
||||
|
||||
subprocess.Popen(
|
||||
helper_cmd,
|
||||
stdin=subprocess.DEVNULL,
|
||||
|
||||
@@ -30,6 +30,8 @@ import time
|
||||
import logging
|
||||
import re
|
||||
import hashlib
|
||||
import subprocess
|
||||
import platform
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
@@ -134,6 +136,91 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
|
||||
"""
|
||||
op_name = str(op or "").strip().lower()
|
||||
|
||||
if op_name in {"run-detached", "run_detached", "pipeline-detached", "pipeline_detached"}:
|
||||
pipeline_text = ""
|
||||
seeds = None
|
||||
if isinstance(data, dict):
|
||||
pipeline_text = str(data.get("pipeline") or "").strip()
|
||||
seeds = data.get("seeds")
|
||||
if not pipeline_text:
|
||||
return {
|
||||
"success": False,
|
||||
"stdout": "",
|
||||
"stderr": "",
|
||||
"error": "Missing pipeline",
|
||||
"table": None,
|
||||
}
|
||||
|
||||
py = sys.executable or "python"
|
||||
if platform.system() == "Windows":
|
||||
try:
|
||||
exe = str(py or "").strip()
|
||||
except Exception:
|
||||
exe = ""
|
||||
low = exe.lower()
|
||||
if low.endswith("python.exe"):
|
||||
try:
|
||||
candidate = exe[:-10] + "pythonw.exe"
|
||||
if os.path.exists(candidate):
|
||||
py = candidate
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
cmd = [py, str((_repo_root() / "CLI.py").resolve()), "pipeline", "--pipeline", pipeline_text]
|
||||
if seeds is not None:
|
||||
try:
|
||||
cmd.extend(["--seeds-json", json.dumps(seeds, ensure_ascii=False)])
|
||||
except Exception:
|
||||
# Best-effort; seeds are optional.
|
||||
pass
|
||||
|
||||
popen_kwargs: Dict[str, Any] = {
|
||||
"stdin": subprocess.DEVNULL,
|
||||
"stdout": subprocess.DEVNULL,
|
||||
"stderr": subprocess.DEVNULL,
|
||||
"cwd": str(_repo_root()),
|
||||
}
|
||||
if platform.system() == "Windows":
|
||||
flags = 0
|
||||
try:
|
||||
flags |= int(getattr(subprocess, "DETACHED_PROCESS", 0x00000008))
|
||||
except Exception:
|
||||
flags |= 0x00000008
|
||||
try:
|
||||
flags |= int(getattr(subprocess, "CREATE_NO_WINDOW", 0x08000000))
|
||||
except Exception:
|
||||
flags |= 0x08000000
|
||||
popen_kwargs["creationflags"] = int(flags)
|
||||
try:
|
||||
si = subprocess.STARTUPINFO()
|
||||
si.dwFlags |= int(getattr(subprocess, "STARTF_USESHOWWINDOW", 0x00000001))
|
||||
si.wShowWindow = subprocess.SW_HIDE
|
||||
popen_kwargs["startupinfo"] = si
|
||||
except Exception:
|
||||
pass
|
||||
else:
|
||||
popen_kwargs["start_new_session"] = True
|
||||
|
||||
try:
|
||||
proc = subprocess.Popen(cmd, **popen_kwargs)
|
||||
except Exception as exc:
|
||||
return {
|
||||
"success": False,
|
||||
"stdout": "",
|
||||
"stderr": "",
|
||||
"error": f"Failed to spawn detached pipeline: {type(exc).__name__}: {exc}",
|
||||
"table": None,
|
||||
}
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"stdout": "",
|
||||
"stderr": "",
|
||||
"error": None,
|
||||
"table": None,
|
||||
"pid": int(getattr(proc, "pid", 0) or 0),
|
||||
}
|
||||
|
||||
# Provide store backend choices using the same source as CLI/Typer autocomplete.
|
||||
if op_name in {"store-choices", "store_choices", "get-store-choices", "get_store_choices"}:
|
||||
from CLI import MedeiaCLI # noqa: WPS433
|
||||
|
||||
138
SYS/env_check.py
Normal file
138
SYS/env_check.py
Normal file
@@ -0,0 +1,138 @@
|
||||
"""Environment compatibility checks for known packaging issues.
|
||||
|
||||
This module provides a focused check for `urllib3` correctness and a
|
||||
helpful, actionable error message when the environment looks broken
|
||||
(e.g., due to `urllib3-future` installing a site-packages hook).
|
||||
|
||||
It is intentionally lightweight and safe to import early at process
|
||||
startup so the CLI can detect and surface environment problems before
|
||||
trying to import cmdlets or other modules.
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib
|
||||
import site
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Tuple
|
||||
|
||||
from SYS.logger import log, debug
|
||||
|
||||
|
||||
def _find_potential_urllib3_pth() -> list[str]:
|
||||
"""Return a list of path strings that look like interfering .pth files."""
|
||||
found: list[str] = []
|
||||
try:
|
||||
paths = site.getsitepackages() or []
|
||||
except Exception:
|
||||
paths = []
|
||||
|
||||
for sp in set(paths):
|
||||
try:
|
||||
candidate = Path(sp) / "urllib3_future.pth"
|
||||
if candidate.exists():
|
||||
found.append(str(candidate))
|
||||
except Exception:
|
||||
continue
|
||||
return found
|
||||
|
||||
|
||||
def check_urllib3_compat() -> Tuple[bool, str]:
|
||||
"""Quick check whether `urllib3` looks usable.
|
||||
|
||||
Returns (True, "OK") when everything seems fine. When a problem is
|
||||
detected the returned tuple is (False, <actionable message>) where the
|
||||
message contains steps the user can run to fix the environment.
|
||||
"""
|
||||
try:
|
||||
import urllib3 # type: ignore
|
||||
except Exception as exc: # pragma: no cover - hard to reliably simulate ImportError across envs
|
||||
pths = _find_potential_urllib3_pth()
|
||||
lines = [
|
||||
"Your Python environment appears to have a broken or incomplete 'urllib3' installation.",
|
||||
f"ImportError: {exc!s}",
|
||||
]
|
||||
if pths:
|
||||
lines.append(f"Found potential interfering .pth file(s): {', '.join(pths)}")
|
||||
lines.extend(
|
||||
[
|
||||
"Recommended fixes (activate the project's virtualenv first):",
|
||||
" python -m pip uninstall urllib3-future -y",
|
||||
" python -m pip install --upgrade --force-reinstall urllib3",
|
||||
" python -m pip install niquests -U",
|
||||
"You may also re-run the bootstrap script: scripts\\bootstrap.ps1 (Windows) or scripts/bootstrap.sh (POSIX).",
|
||||
]
|
||||
)
|
||||
return False, "\n".join(lines)
|
||||
|
||||
# Basic sanity checks on the *imported* urllib3 module
|
||||
problems: list[str] = []
|
||||
if not getattr(urllib3, "__version__", None):
|
||||
problems.append("missing urllib3.__version__")
|
||||
if not hasattr(urllib3, "exceptions"):
|
||||
problems.append("missing urllib3.exceptions")
|
||||
|
||||
try:
|
||||
spec = importlib.util.find_spec("urllib3.exceptions")
|
||||
if spec is None or not getattr(spec, "origin", None):
|
||||
problems.append("urllib3.exceptions not importable")
|
||||
except Exception:
|
||||
problems.append("urllib3.exceptions not importable (importlib check failed)")
|
||||
|
||||
if problems:
|
||||
pths = _find_potential_urllib3_pth()
|
||||
lines = [
|
||||
"Your Python environment appears to have a broken 'urllib3' package:",
|
||||
f"Problems found: {', '.join(problems)}",
|
||||
]
|
||||
if pths:
|
||||
lines.append(f"Found potential interfering .pth file(s): {', '.join(pths)}")
|
||||
lines.extend(
|
||||
[
|
||||
"Recommended fixes (activate the project's virtualenv first):",
|
||||
" python -m pip uninstall urllib3-future -y",
|
||||
" python -m pip install --upgrade --force-reinstall urllib3",
|
||||
" python -m pip install niquests -U",
|
||||
"You may also re-run the bootstrap script: scripts\\bootstrap.ps1 (Windows) or scripts/bootstrap.sh (POSIX).",
|
||||
]
|
||||
)
|
||||
return False, "\n".join(lines)
|
||||
|
||||
# Looks good
|
||||
debug("urllib3 appears usable: version=%s, exceptions=%s", getattr(urllib3, "__version__", "<unknown>"), hasattr(urllib3, "exceptions"))
|
||||
return True, "OK"
|
||||
|
||||
|
||||
def ensure_urllib3_ok(exit_on_error: bool = True) -> bool:
|
||||
"""Ensure urllib3 is usable and print an actionable message if not.
|
||||
|
||||
- If `exit_on_error` is True (default) this will call `sys.exit(2)` when
|
||||
a problem is detected so callers that call this early in process
|
||||
startup won't continue with a partially-broken environment.
|
||||
- If `exit_on_error` is False the function will print the message and
|
||||
return False so the caller can decide how to proceed.
|
||||
"""
|
||||
ok, message = check_urllib3_compat()
|
||||
if ok:
|
||||
return True
|
||||
|
||||
# Prominent user-facing output
|
||||
border = "=" * 80
|
||||
log(border)
|
||||
log("ENVIRONMENT PROBLEM DETECTED: Broken 'urllib3' package")
|
||||
log(message)
|
||||
log(border)
|
||||
|
||||
if exit_on_error:
|
||||
log("Please follow the steps above to fix your environment, then re-run this command.")
|
||||
try:
|
||||
sys.exit(2)
|
||||
except SystemExit:
|
||||
raise
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == "__main__": # pragma: no cover - manual debugging helper
|
||||
ok, message = check_urllib3_compat()
|
||||
print(message)
|
||||
sys.exit(0 if ok else 2)
|
||||
18
SYS/tasks.py
18
SYS/tasks.py
@@ -102,6 +102,23 @@ def _run_task(args, parser) -> int:
|
||||
'command': command,
|
||||
'cwd': args.cwd or os.getcwd(),
|
||||
})
|
||||
|
||||
popen_kwargs = {}
|
||||
if os.name == 'nt':
|
||||
# Avoid flashing a console window when spawning console-subsystem executables.
|
||||
flags = 0
|
||||
try:
|
||||
flags |= int(getattr(subprocess, 'CREATE_NO_WINDOW', 0x08000000))
|
||||
except Exception:
|
||||
flags |= 0x08000000
|
||||
popen_kwargs['creationflags'] = flags
|
||||
try:
|
||||
si = subprocess.STARTUPINFO()
|
||||
si.dwFlags |= subprocess.STARTF_USESHOWWINDOW
|
||||
si.wShowWindow = subprocess.SW_HIDE
|
||||
popen_kwargs['startupinfo'] = si
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
@@ -112,6 +129,7 @@ def _run_task(args, parser) -> int:
|
||||
text=True,
|
||||
bufsize=1,
|
||||
universal_newlines=True,
|
||||
**popen_kwargs,
|
||||
)
|
||||
except FileNotFoundError as exc:
|
||||
notifier('downlow-task-event', {
|
||||
|
||||
@@ -1,18 +1,16 @@
|
||||
"""Pipeline execution utilities for the Textual UI.
|
||||
|
||||
This module mirrors the CLI pipeline behaviour while exposing a class-based
|
||||
interface that the TUI can call. It keeps all pipeline/cmdlet integration in
|
||||
one place so the interface layer stays focused on presentation.
|
||||
The TUI is a frontend to the CLI, so it must use the same pipeline executor
|
||||
implementation as the CLI (`CLI.PipelineExecutor`).
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import io
|
||||
import shlex
|
||||
import uuid
|
||||
from dataclasses import dataclass, field
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Callable, Dict, List, Optional, Sequence
|
||||
|
||||
BASE_DIR = Path(__file__).resolve().parent
|
||||
@@ -23,11 +21,10 @@ for path in (ROOT_DIR, BASE_DIR):
|
||||
sys.path.insert(0, str_path)
|
||||
|
||||
import pipeline as ctx
|
||||
from cmdlet import REGISTRY
|
||||
from config import get_local_storage_path, load_config
|
||||
from SYS.worker_manager import WorkerManager
|
||||
|
||||
from CLI import MedeiaCLI
|
||||
from CLI import ConfigLoader, PipelineExecutor as CLIPipelineExecutor, WorkerManagerRegistry
|
||||
from SYS.logger import set_debug
|
||||
from rich_display import capture_rich_output
|
||||
from result_table import ResultTable
|
||||
|
||||
|
||||
@dataclass(slots=True)
|
||||
@@ -73,24 +70,16 @@ class PipelineRunResult:
|
||||
}
|
||||
|
||||
|
||||
class PipelineExecutor:
|
||||
"""Thin wrapper over the cmdlet registry + pipeline context."""
|
||||
class PipelineRunner:
|
||||
"""TUI wrapper that delegates to the canonical CLI pipeline executor."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
config: Optional[Dict[str, Any]] = None,
|
||||
worker_manager: Optional[WorkerManager] = None,
|
||||
) -> None:
|
||||
self._config = config or load_config()
|
||||
self._worker_manager = worker_manager
|
||||
if self._worker_manager is None:
|
||||
self._worker_manager = self._ensure_worker_manager()
|
||||
if self._worker_manager:
|
||||
self._config["_worker_manager"] = self._worker_manager
|
||||
def __init__(self) -> None:
|
||||
self._config_loader = ConfigLoader(root=ROOT_DIR)
|
||||
self._executor = CLIPipelineExecutor(config_loader=self._config_loader)
|
||||
self._worker_manager = None
|
||||
|
||||
@property
|
||||
def worker_manager(self) -> Optional[WorkerManager]:
|
||||
def worker_manager(self):
|
||||
return self._worker_manager
|
||||
|
||||
def run_pipeline(
|
||||
@@ -98,290 +87,214 @@ class PipelineExecutor:
|
||||
pipeline_text: str,
|
||||
*,
|
||||
seeds: Optional[Any] = None,
|
||||
isolate: bool = False,
|
||||
on_log: Optional[Callable[[str], None]] = None,
|
||||
) -> PipelineRunResult:
|
||||
"""Execute a pipeline string and return structured results.
|
||||
snapshot: Optional[Dict[str, Any]] = None
|
||||
if isolate:
|
||||
snapshot = self._snapshot_ctx_state()
|
||||
|
||||
Args:
|
||||
pipeline_text: Raw pipeline text entered by the user.
|
||||
on_log: Optional callback that receives human-readable log lines.
|
||||
"""
|
||||
normalized = pipeline_text.strip()
|
||||
normalized = str(pipeline_text or "").strip()
|
||||
result = PipelineRunResult(pipeline=normalized, success=False)
|
||||
if not normalized:
|
||||
result.error = "Pipeline is empty"
|
||||
return result
|
||||
|
||||
tokens = self._tokenize(normalized)
|
||||
stages = self._split_stages(tokens)
|
||||
if not stages:
|
||||
result.error = "Pipeline contains no stages"
|
||||
try:
|
||||
from cli_syntax import validate_pipeline_text
|
||||
|
||||
syntax_error = validate_pipeline_text(normalized)
|
||||
if syntax_error:
|
||||
result.error = syntax_error.message
|
||||
result.stderr = syntax_error.message
|
||||
return result
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
tokens = shlex.split(normalized)
|
||||
except Exception as exc:
|
||||
result.error = f"Syntax error: {exc}"
|
||||
result.stderr = result.error
|
||||
return result
|
||||
|
||||
if not tokens:
|
||||
result.error = "Pipeline contains no tokens"
|
||||
return result
|
||||
|
||||
config = self._config_loader.load()
|
||||
try:
|
||||
set_debug(bool(config.get("debug", False)))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
self._worker_manager = WorkerManagerRegistry.ensure(config)
|
||||
except Exception:
|
||||
self._worker_manager = None
|
||||
|
||||
ctx.reset()
|
||||
ctx.set_current_command_text(normalized)
|
||||
|
||||
if seeds is not None:
|
||||
try:
|
||||
# Mirror CLI behavior: treat seeds as output of a virtual previous stage.
|
||||
if not isinstance(seeds, list):
|
||||
seeds = [seeds]
|
||||
setter = getattr(ctx, "set_last_result_items_only", None)
|
||||
if callable(setter):
|
||||
setter(seeds)
|
||||
else:
|
||||
ctx.set_last_items(list(seeds))
|
||||
ctx.set_last_result_items_only(list(seeds))
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
stdout_buffer = io.StringIO()
|
||||
stderr_buffer = io.StringIO()
|
||||
piped_result: Any = None
|
||||
worker_session = self._start_worker_session(normalized)
|
||||
|
||||
try:
|
||||
with contextlib.redirect_stdout(stdout_buffer), contextlib.redirect_stderr(
|
||||
stderr_buffer
|
||||
):
|
||||
for index, stage_tokens in enumerate(stages):
|
||||
stage = self._execute_stage(
|
||||
index=index,
|
||||
total=len(stages),
|
||||
stage_tokens=stage_tokens,
|
||||
piped_input=piped_result,
|
||||
on_log=on_log,
|
||||
)
|
||||
result.stages.append(stage)
|
||||
|
||||
if stage.status != "completed":
|
||||
result.error = stage.error or f"Stage {stage.name} failed"
|
||||
return result
|
||||
|
||||
if index == len(stages) - 1:
|
||||
result.emitted = stage.emitted
|
||||
result.result_table = stage.result_table
|
||||
else:
|
||||
piped_result = stage.emitted
|
||||
|
||||
result.success = True
|
||||
return result
|
||||
with capture_rich_output(stdout=stdout_buffer, stderr=stderr_buffer):
|
||||
with contextlib.redirect_stdout(stdout_buffer), contextlib.redirect_stderr(stderr_buffer):
|
||||
if on_log:
|
||||
on_log("Executing pipeline via CLI executor...")
|
||||
self._executor.execute_tokens(list(tokens))
|
||||
except Exception as exc:
|
||||
result.error = f"{type(exc).__name__}: {exc}"
|
||||
finally:
|
||||
result.stdout = stdout_buffer.getvalue()
|
||||
result.stderr = stderr_buffer.getvalue()
|
||||
try:
|
||||
ctx.clear_current_command_text()
|
||||
if worker_session is not None:
|
||||
status = "completed" if result.success else "error"
|
||||
worker_session.finish(status=status, message=result.error or "")
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Stage execution helpers
|
||||
# ------------------------------------------------------------------
|
||||
def _execute_stage(
|
||||
self,
|
||||
*,
|
||||
index: int,
|
||||
total: int,
|
||||
stage_tokens: Sequence[str],
|
||||
piped_input: Any,
|
||||
on_log: Optional[Callable[[str], None]],
|
||||
) -> PipelineStageResult:
|
||||
if not stage_tokens:
|
||||
return PipelineStageResult(name="(empty)", args=[], status="skipped")
|
||||
|
||||
cmd_name = stage_tokens[0].replace("_", "-").lower()
|
||||
stage_args = stage_tokens[1:]
|
||||
stage = PipelineStageResult(name=cmd_name, args=stage_args)
|
||||
|
||||
if cmd_name.startswith("@"):
|
||||
return self._apply_selection_stage(
|
||||
token=cmd_name,
|
||||
stage=stage,
|
||||
piped_input=piped_input,
|
||||
on_log=on_log,
|
||||
)
|
||||
|
||||
cmd_fn = REGISTRY.get(cmd_name)
|
||||
if not cmd_fn:
|
||||
stage.status = "failed"
|
||||
stage.error = f"Unknown command: {cmd_name}"
|
||||
return stage
|
||||
|
||||
pipeline_ctx = ctx.PipelineStageContext(stage_index=index, total_stages=total, pipe_index=index)
|
||||
ctx.set_stage_context(pipeline_ctx)
|
||||
|
||||
try:
|
||||
return_code = cmd_fn(piped_input, list(stage_args), self._config)
|
||||
except Exception as exc: # pragma: no cover - surfaced in UI
|
||||
stage.status = "failed"
|
||||
stage.error = f"{type(exc).__name__}: {exc}"
|
||||
if on_log:
|
||||
on_log(stage.error)
|
||||
return stage
|
||||
finally:
|
||||
ctx.set_stage_context(None)
|
||||
|
||||
emitted = list(getattr(pipeline_ctx, "emits", []) or [])
|
||||
stage.emitted = emitted
|
||||
|
||||
# Capture the ResultTable if the cmdlet set one
|
||||
# Check display table first (overlay), then last result table
|
||||
stage.result_table = ctx.get_display_table() or ctx.get_last_result_table()
|
||||
|
||||
if return_code != 0:
|
||||
stage.status = "failed"
|
||||
stage.error = f"Exit code {return_code}"
|
||||
else:
|
||||
stage.status = "completed"
|
||||
stage.error = None
|
||||
|
||||
worker_id = self._current_worker_id()
|
||||
if self._worker_manager and worker_id:
|
||||
label = f"[Stage {index + 1}/{total}] {cmd_name} {stage.status}"
|
||||
self._worker_manager.log_step(worker_id, label)
|
||||
|
||||
# Don't clear the table if we just captured it, but ensure items are set for next stage
|
||||
# If we have a table, we should probably keep it in ctx for history if needed
|
||||
# But for pipeline execution, we mainly care about passing items to next stage
|
||||
# ctx.set_last_result_table(None, emitted) <-- This was clearing it
|
||||
|
||||
# Ensure items are available for next stage
|
||||
ctx.set_last_items(emitted)
|
||||
return stage
|
||||
|
||||
def _apply_selection_stage(
|
||||
self,
|
||||
*,
|
||||
token: str,
|
||||
stage: PipelineStageResult,
|
||||
piped_input: Any,
|
||||
on_log: Optional[Callable[[str], None]],
|
||||
) -> PipelineStageResult:
|
||||
# Bare '@' means use the subject associated with the current result table (e.g., the file shown in a tag/URL view)
|
||||
if token == "@":
|
||||
subject = ctx.get_last_result_subject()
|
||||
if subject is None:
|
||||
stage.status = "failed"
|
||||
stage.error = "Selection requested (@) but there is no current result context"
|
||||
return stage
|
||||
stage.emitted = subject if isinstance(subject, list) else [subject]
|
||||
ctx.set_last_items(stage.emitted)
|
||||
stage.status = "completed"
|
||||
if on_log:
|
||||
on_log("Selected current table subject via @")
|
||||
return stage
|
||||
|
||||
selection = self._parse_selection(token)
|
||||
items = piped_input or []
|
||||
if not isinstance(items, list):
|
||||
items = list(items if isinstance(items, Sequence) else [items])
|
||||
|
||||
if not items:
|
||||
stage.status = "failed"
|
||||
stage.error = "Selection requested but there is no upstream data"
|
||||
return stage
|
||||
|
||||
if selection is None:
|
||||
stage.emitted = list(items)
|
||||
else:
|
||||
zero_based = sorted(i - 1 for i in selection if i > 0)
|
||||
stage.emitted = [items[i] for i in zero_based if 0 <= i < len(items)]
|
||||
|
||||
if not stage.emitted:
|
||||
stage.status = "failed"
|
||||
stage.error = "Selection matched no rows"
|
||||
return stage
|
||||
|
||||
ctx.set_last_items(stage.emitted)
|
||||
ctx.set_last_result_table(None, stage.emitted)
|
||||
stage.status = "completed"
|
||||
if on_log:
|
||||
on_log(f"Selected {len(stage.emitted)} item(s) via {token}")
|
||||
return stage
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Worker/session helpers
|
||||
# ------------------------------------------------------------------
|
||||
def _start_worker_session(self, pipeline_text: str) -> Optional[_WorkerSession]:
|
||||
manager = self._ensure_worker_manager()
|
||||
if manager is None:
|
||||
return None
|
||||
|
||||
worker_id = f"tui_pipeline_{uuid.uuid4().hex[:8]}"
|
||||
tracked = manager.track_worker(
|
||||
worker_id,
|
||||
worker_type="pipeline",
|
||||
title="Pipeline run",
|
||||
description=pipeline_text,
|
||||
pipe=pipeline_text,
|
||||
)
|
||||
if not tracked:
|
||||
return None
|
||||
|
||||
manager.log_step(worker_id, "Pipeline started")
|
||||
self._config["_current_worker_id"] = worker_id
|
||||
return _WorkerSession(manager=manager, worker_id=worker_id, config=self._config)
|
||||
|
||||
def _ensure_worker_manager(self) -> Optional[WorkerManager]:
|
||||
if self._worker_manager:
|
||||
return self._worker_manager
|
||||
library_root = get_local_storage_path(self._config)
|
||||
if not library_root:
|
||||
return None
|
||||
try:
|
||||
self._worker_manager = WorkerManager(Path(library_root), auto_refresh_interval=0)
|
||||
self._config["_worker_manager"] = self._worker_manager
|
||||
except Exception:
|
||||
self._worker_manager = None
|
||||
return self._worker_manager
|
||||
|
||||
def _current_worker_id(self) -> Optional[str]:
|
||||
worker_id = self._config.get("_current_worker_id")
|
||||
return str(worker_id) if worker_id else None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Parsing helpers
|
||||
# ------------------------------------------------------------------
|
||||
@staticmethod
|
||||
def _tokenize(pipeline_text: str) -> List[str]:
|
||||
try:
|
||||
return shlex.split(pipeline_text)
|
||||
except ValueError:
|
||||
return pipeline_text.split()
|
||||
|
||||
@staticmethod
|
||||
def _split_stages(tokens: Sequence[str]) -> List[List[str]]:
|
||||
stages: List[List[str]] = []
|
||||
current: List[str] = []
|
||||
for token in tokens:
|
||||
if token == "|":
|
||||
if current:
|
||||
stages.append(current)
|
||||
current = []
|
||||
else:
|
||||
current.append(token)
|
||||
if current:
|
||||
stages.append(current)
|
||||
return stages
|
||||
|
||||
@staticmethod
|
||||
def _parse_selection(token: str) -> Optional[Sequence[int]]:
|
||||
parsed = MedeiaCLI.parse_selection_syntax(token)
|
||||
return sorted(parsed) if parsed else None
|
||||
|
||||
|
||||
class _WorkerSession:
|
||||
"""Minimal worker session wrapper for the TUI executor."""
|
||||
|
||||
def __init__(self, *, manager: WorkerManager, worker_id: str, config: Optional[Dict[str, Any]] = None) -> None:
|
||||
self._manager = manager
|
||||
self.worker_id = worker_id
|
||||
self._config = config
|
||||
|
||||
def finish(self, *, status: str, message: str) -> None:
|
||||
try:
|
||||
self._manager.finish_worker(self.worker_id, result=status, error_msg=message)
|
||||
self._manager.log_step(self.worker_id, f"Pipeline {status}")
|
||||
except Exception:
|
||||
pass
|
||||
if self._config and self._config.get("_current_worker_id") == self.worker_id:
|
||||
self._config.pop("_current_worker_id", None)
|
||||
result.stdout = stdout_buffer.getvalue()
|
||||
result.stderr = stderr_buffer.getvalue()
|
||||
|
||||
# Pull the canonical state out of pipeline context.
|
||||
table = None
|
||||
try:
|
||||
table = ctx.get_display_table() or ctx.get_current_stage_table() or ctx.get_last_result_table()
|
||||
except Exception:
|
||||
table = None
|
||||
|
||||
items: List[Any] = []
|
||||
try:
|
||||
items = list(ctx.get_last_result_items() or [])
|
||||
except Exception:
|
||||
items = []
|
||||
|
||||
if table is None and items:
|
||||
try:
|
||||
synth = ResultTable("Results")
|
||||
for item in items:
|
||||
synth.add_result(item)
|
||||
table = synth
|
||||
except Exception:
|
||||
table = None
|
||||
|
||||
result.emitted = items
|
||||
result.result_table = table
|
||||
|
||||
combined = (result.stdout + "\n" + result.stderr).strip().lower()
|
||||
failure_markers = (
|
||||
"unknown command:",
|
||||
"pipeline order error:",
|
||||
"invalid selection:",
|
||||
"invalid pipeline syntax",
|
||||
"failed to execute pipeline",
|
||||
"[error]",
|
||||
)
|
||||
if result.error:
|
||||
result.success = False
|
||||
elif any(m in combined for m in failure_markers):
|
||||
result.success = False
|
||||
if not result.error:
|
||||
result.error = "Pipeline failed"
|
||||
else:
|
||||
result.success = True
|
||||
|
||||
if isolate and snapshot is not None:
|
||||
try:
|
||||
self._restore_ctx_state(snapshot)
|
||||
except Exception:
|
||||
# Best-effort; isolation should never break normal operation.
|
||||
pass
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def _snapshot_ctx_state() -> Dict[str, Any]:
|
||||
"""Best-effort snapshot of pipeline context so TUI popups don't clobber UI state."""
|
||||
|
||||
def _copy(val: Any) -> Any:
|
||||
if isinstance(val, list):
|
||||
return val.copy()
|
||||
if isinstance(val, dict):
|
||||
return val.copy()
|
||||
return val
|
||||
|
||||
snap: Dict[str, Any] = {}
|
||||
keys = [
|
||||
"_LIVE_PROGRESS",
|
||||
"_CURRENT_CONTEXT",
|
||||
"_LAST_SEARCH_QUERY",
|
||||
"_PIPELINE_REFRESHED",
|
||||
"_PIPELINE_LAST_ITEMS",
|
||||
"_LAST_RESULT_TABLE",
|
||||
"_LAST_RESULT_ITEMS",
|
||||
"_LAST_RESULT_SUBJECT",
|
||||
"_RESULT_TABLE_HISTORY",
|
||||
"_RESULT_TABLE_FORWARD",
|
||||
"_CURRENT_STAGE_TABLE",
|
||||
"_DISPLAY_ITEMS",
|
||||
"_DISPLAY_TABLE",
|
||||
"_DISPLAY_SUBJECT",
|
||||
"_PIPELINE_LAST_SELECTION",
|
||||
"_PIPELINE_COMMAND_TEXT",
|
||||
"_CURRENT_CMDLET_NAME",
|
||||
"_CURRENT_STAGE_TEXT",
|
||||
"_PIPELINE_VALUES",
|
||||
"_PENDING_PIPELINE_TAIL",
|
||||
"_PENDING_PIPELINE_SOURCE",
|
||||
"_UI_LIBRARY_REFRESH_CALLBACK",
|
||||
]
|
||||
|
||||
for k in keys:
|
||||
snap[k] = _copy(getattr(ctx, k, None))
|
||||
|
||||
# Deepen copies where nested lists are common.
|
||||
try:
|
||||
hist = list(getattr(ctx, "_RESULT_TABLE_HISTORY", []) or [])
|
||||
snap["_RESULT_TABLE_HISTORY"] = [
|
||||
(t, (items.copy() if isinstance(items, list) else list(items) if items else []), subj)
|
||||
for (t, items, subj) in hist
|
||||
if isinstance((t, items, subj), tuple)
|
||||
]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
fwd = list(getattr(ctx, "_RESULT_TABLE_FORWARD", []) or [])
|
||||
snap["_RESULT_TABLE_FORWARD"] = [
|
||||
(t, (items.copy() if isinstance(items, list) else list(items) if items else []), subj)
|
||||
for (t, items, subj) in fwd
|
||||
if isinstance((t, items, subj), tuple)
|
||||
]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
tail = list(getattr(ctx, "_PENDING_PIPELINE_TAIL", []) or [])
|
||||
snap["_PENDING_PIPELINE_TAIL"] = [list(stage) for stage in tail if isinstance(stage, list)]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
values = getattr(ctx, "_PIPELINE_VALUES", None)
|
||||
if isinstance(values, dict):
|
||||
snap["_PIPELINE_VALUES"] = values.copy()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return snap
|
||||
|
||||
@staticmethod
|
||||
def _restore_ctx_state(snapshot: Dict[str, Any]) -> None:
|
||||
for k, v in (snapshot or {}).items():
|
||||
try:
|
||||
setattr(ctx, k, v)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
740
TUI/tui.py
740
TUI/tui.py
@@ -1,26 +1,20 @@
|
||||
"""Modern Textual UI for driving Medeia-Macina pipelines."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, List, Optional, Sequence
|
||||
from typing import Any, List, Optional, Sequence, Tuple
|
||||
|
||||
from textual import work
|
||||
from textual import on, work
|
||||
from textual.app import App, ComposeResult
|
||||
from textual.binding import Binding
|
||||
from textual.containers import Container, Horizontal, Vertical, VerticalScroll
|
||||
from textual.widgets import (
|
||||
Button,
|
||||
DataTable,
|
||||
Footer,
|
||||
Header,
|
||||
Input,
|
||||
ListItem,
|
||||
ListView,
|
||||
Static,
|
||||
TextArea,
|
||||
Tree,
|
||||
)
|
||||
from textual.events import Key
|
||||
from textual.containers import Container, Horizontal, Vertical
|
||||
from textual.screen import ModalScreen
|
||||
from textual.widgets import Button, DataTable, Footer, Header, Input, Label, OptionList, Select, Static, TextArea
|
||||
from textual.widgets.option_list import Option
|
||||
|
||||
BASE_DIR = Path(__file__).resolve().parent
|
||||
ROOT_DIR = BASE_DIR.parent
|
||||
@@ -29,25 +23,198 @@ for path in (BASE_DIR, ROOT_DIR):
|
||||
if str_path not in sys.path:
|
||||
sys.path.insert(0, str_path)
|
||||
|
||||
from menu_actions import ( # type: ignore # noqa: E402
|
||||
PIPELINE_PRESETS,
|
||||
PipelinePreset,
|
||||
)
|
||||
from pipeline_runner import PipelineExecutor, PipelineRunResult # type: ignore # noqa: E402
|
||||
from pipeline_runner import PipelineRunResult # type: ignore # noqa: E402
|
||||
from result_table import ResultTable # type: ignore # noqa: E402
|
||||
|
||||
from config import load_config # type: ignore # noqa: E402
|
||||
from Store.registry import Store as StoreRegistry # type: ignore # noqa: E402
|
||||
from cmdlet_catalog import ensure_registry_loaded, list_cmdlet_names # type: ignore # noqa: E402
|
||||
from cli_syntax import validate_pipeline_text # type: ignore # noqa: E402
|
||||
|
||||
class PresetListItem(ListItem):
|
||||
"""List entry that stores its pipeline preset."""
|
||||
from pipeline_runner import PipelineRunner # type: ignore # noqa: E402
|
||||
|
||||
def __init__(self, preset: PipelinePreset) -> None:
|
||||
super().__init__(
|
||||
Static(
|
||||
f"[b]{preset.label}[/b]\n[pale_green4]{preset.description}[/pale_green4]",
|
||||
classes="preset-entry",
|
||||
)
|
||||
)
|
||||
self.preset = preset
|
||||
|
||||
def _dedup_preserve_order(items: List[str]) -> List[str]:
|
||||
out: List[str] = []
|
||||
seen: set[str] = set()
|
||||
for raw in items:
|
||||
s = str(raw or "").strip()
|
||||
if not s:
|
||||
continue
|
||||
key = s.lower()
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
out.append(s)
|
||||
return out
|
||||
|
||||
|
||||
def _extract_tag_names(emitted: Sequence[Any]) -> List[str]:
|
||||
tags: List[str] = []
|
||||
for obj in emitted or []:
|
||||
try:
|
||||
if hasattr(obj, "tag_name"):
|
||||
val = getattr(obj, "tag_name")
|
||||
if val:
|
||||
tags.append(str(val))
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if isinstance(obj, dict):
|
||||
for k in ("tag_name", "tag", "name", "value"):
|
||||
v = obj.get(k)
|
||||
if isinstance(v, str) and v.strip():
|
||||
tags.append(v.strip())
|
||||
break
|
||||
continue
|
||||
return _dedup_preserve_order(tags)
|
||||
|
||||
|
||||
class TextPopup(ModalScreen[None]):
|
||||
def __init__(self, *, title: str, text: str) -> None:
|
||||
super().__init__()
|
||||
self._title = str(title)
|
||||
self._text = str(text or "")
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
yield Static(self._title, id="popup-title")
|
||||
yield TextArea(self._text, id="popup-text", read_only=True)
|
||||
yield Button("Close", id="popup-close")
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
if event.button.id == "popup-close":
|
||||
self.dismiss(None)
|
||||
|
||||
|
||||
class TagEditorPopup(ModalScreen[None]):
|
||||
def __init__(self, *, seeds: Any, store_name: str, file_hash: Optional[str]) -> None:
|
||||
super().__init__()
|
||||
self._seeds = seeds
|
||||
self._store = str(store_name or "").strip()
|
||||
self._hash = str(file_hash or "").strip() if file_hash else ""
|
||||
self._original_tags: List[str] = []
|
||||
self._status: Optional[Static] = None
|
||||
self._editor: Optional[TextArea] = None
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
yield Static("Tags", id="popup-title")
|
||||
yield TextArea("", id="tags-editor")
|
||||
with Horizontal(id="tags-buttons"):
|
||||
yield Button("Save", id="tags-save")
|
||||
yield Button("Close", id="tags-close")
|
||||
yield Static("", id="tags-status")
|
||||
|
||||
def on_mount(self) -> None:
|
||||
self._status = self.query_one("#tags-status", Static)
|
||||
self._editor = self.query_one("#tags-editor", TextArea)
|
||||
self._set_status("Loading tags…")
|
||||
self._load_tags_background()
|
||||
|
||||
def _set_status(self, msg: str) -> None:
|
||||
if self._status:
|
||||
self._status.update(str(msg or ""))
|
||||
|
||||
@work(thread=True)
|
||||
def _load_tags_background(self) -> None:
|
||||
app = self.app # PipelineHubApp
|
||||
try:
|
||||
runner: PipelineRunner = getattr(app, "executor")
|
||||
cmd = f"@1 | get-tag -emit"
|
||||
res = runner.run_pipeline(cmd, seeds=self._seeds, isolate=True)
|
||||
tags = _extract_tag_names(res.emitted)
|
||||
except Exception as exc:
|
||||
tags = []
|
||||
try:
|
||||
app.call_from_thread(self._set_status, f"Error: {type(exc).__name__}: {exc}")
|
||||
except Exception:
|
||||
self._set_status(f"Error: {type(exc).__name__}: {exc}")
|
||||
self._original_tags = tags
|
||||
try:
|
||||
app.call_from_thread(self._apply_loaded_tags, tags)
|
||||
except Exception:
|
||||
self._apply_loaded_tags(tags)
|
||||
|
||||
def _apply_loaded_tags(self, tags: List[str]) -> None:
|
||||
if self._editor:
|
||||
self._editor.text = "\n".join(tags)
|
||||
self._set_status(f"Loaded {len(tags)} tag(s)")
|
||||
|
||||
def _parse_editor_tags(self) -> List[str]:
|
||||
raw = ""
|
||||
try:
|
||||
raw = str(self._editor.text or "") if self._editor else ""
|
||||
except Exception:
|
||||
raw = ""
|
||||
lines = [t.strip() for t in raw.replace("\r\n", "\n").split("\n")]
|
||||
return _dedup_preserve_order([t for t in lines if t])
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
if event.button.id == "tags-close":
|
||||
self.dismiss(None)
|
||||
return
|
||||
if event.button.id == "tags-save":
|
||||
self._save_tags()
|
||||
|
||||
def _save_tags(self) -> None:
|
||||
desired = self._parse_editor_tags()
|
||||
current = _dedup_preserve_order(list(self._original_tags or []))
|
||||
|
||||
desired_set = {t.lower() for t in desired}
|
||||
current_set = {t.lower() for t in current}
|
||||
|
||||
to_add = [t for t in desired if t.lower() not in current_set]
|
||||
to_del = [t for t in current if t.lower() not in desired_set]
|
||||
|
||||
if not to_add and not to_del:
|
||||
self._set_status("No changes")
|
||||
return
|
||||
|
||||
self._set_status("Saving…")
|
||||
self._save_tags_background(to_add, to_del, desired)
|
||||
|
||||
@work(thread=True)
|
||||
def _save_tags_background(self, to_add: List[str], to_del: List[str], desired: List[str]) -> None:
|
||||
app = self.app # PipelineHubApp
|
||||
try:
|
||||
runner: PipelineRunner = getattr(app, "executor")
|
||||
store_tok = json.dumps(self._store)
|
||||
query_chunk = f" -query {json.dumps(f'hash:{self._hash}')}" if self._hash else ""
|
||||
|
||||
failures: List[str] = []
|
||||
|
||||
if to_del:
|
||||
del_args = " ".join(json.dumps(t) for t in to_del)
|
||||
del_cmd = f"@1 | delete-tag -store {store_tok}{query_chunk} {del_args}"
|
||||
del_res = runner.run_pipeline(del_cmd, seeds=self._seeds, isolate=True)
|
||||
if not getattr(del_res, "success", False):
|
||||
failures.append(str(getattr(del_res, "error", "") or getattr(del_res, "stderr", "") or "delete-tag failed").strip())
|
||||
|
||||
if to_add:
|
||||
add_args = " ".join(json.dumps(t) for t in to_add)
|
||||
add_cmd = f"@1 | add-tag -store {store_tok}{query_chunk} {add_args}"
|
||||
add_res = runner.run_pipeline(add_cmd, seeds=self._seeds, isolate=True)
|
||||
if not getattr(add_res, "success", False):
|
||||
failures.append(str(getattr(add_res, "error", "") or getattr(add_res, "stderr", "") or "add-tag failed").strip())
|
||||
|
||||
if failures:
|
||||
msg = failures[0]
|
||||
try:
|
||||
app.call_from_thread(self._set_status, f"Error: {msg}")
|
||||
except Exception:
|
||||
self._set_status(f"Error: {msg}")
|
||||
return
|
||||
|
||||
self._original_tags = list(desired)
|
||||
try:
|
||||
app.call_from_thread(self._set_status, f"Saved (+{len(to_add)}, -{len(to_del)})")
|
||||
except Exception:
|
||||
self._set_status(f"Saved (+{len(to_add)}, -{len(to_del)})")
|
||||
except Exception as exc:
|
||||
try:
|
||||
app.call_from_thread(self._set_status, f"Error: {type(exc).__name__}: {exc}")
|
||||
except Exception:
|
||||
self._set_status(f"Error: {type(exc).__name__}: {exc}")
|
||||
|
||||
|
||||
class PipelineHubApp(App):
|
||||
@@ -58,22 +225,27 @@ class PipelineHubApp(App):
|
||||
Binding("ctrl+enter", "run_pipeline", "Run Pipeline"),
|
||||
Binding("f5", "refresh_workers", "Refresh Workers"),
|
||||
Binding("ctrl+l", "focus_command", "Focus Input", show=False),
|
||||
Binding("ctrl+g", "focus_logs", "Focus Logs", show=False),
|
||||
]
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self.executor = PipelineExecutor()
|
||||
self.executor = PipelineRunner()
|
||||
self.result_items: List[Any] = []
|
||||
self.log_lines: List[str] = []
|
||||
self.command_input: Optional[Input] = None
|
||||
self.store_select: Optional[Select] = None
|
||||
self.path_input: Optional[Input] = None
|
||||
self.log_output: Optional[TextArea] = None
|
||||
self.results_table: Optional[DataTable] = None
|
||||
self.metadata_tree: Optional[Tree] = None
|
||||
self.worker_table: Optional[DataTable] = None
|
||||
self.preset_list: Optional[ListView] = None
|
||||
self.status_panel: Optional[Static] = None
|
||||
self.current_result_table: Optional[ResultTable] = None
|
||||
self.suggestion_list: Optional[OptionList] = None
|
||||
self._cmdlet_names: List[str] = []
|
||||
self._pipeline_running = False
|
||||
self._pipeline_worker: Any = None
|
||||
self._selected_row_index: int = 0
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Layout
|
||||
@@ -81,43 +253,58 @@ class PipelineHubApp(App):
|
||||
def compose(self) -> ComposeResult: # noqa: D401 - Textual compose hook
|
||||
yield Header(show_clock=True)
|
||||
with Container(id="app-shell"):
|
||||
with Horizontal(id="command-pane"):
|
||||
self.command_input = Input(
|
||||
placeholder='download-data "<url>" | merge-file | add-tags -store local | add-file -storage local',
|
||||
id="pipeline-input",
|
||||
)
|
||||
yield self.command_input
|
||||
yield Button("Run", id="run-button", variant="primary")
|
||||
self.status_panel = Static("Idle", id="status-panel")
|
||||
yield self.status_panel
|
||||
with Horizontal(id="content-row"):
|
||||
with VerticalScroll(id="left-pane"):
|
||||
yield Static("Pipeline Presets", classes="section-title")
|
||||
self.preset_list = ListView(
|
||||
*(PresetListItem(preset) for preset in PIPELINE_PRESETS),
|
||||
id="preset-list",
|
||||
)
|
||||
yield self.preset_list
|
||||
yield Static("Logs", classes="section-title")
|
||||
self.log_output = TextArea(id="log-output", read_only=True)
|
||||
yield self.log_output
|
||||
yield Static("Workers", classes="section-title")
|
||||
self.worker_table = DataTable(id="workers-table")
|
||||
yield self.worker_table
|
||||
with Vertical(id="right-pane"):
|
||||
yield Static("Results", classes="section-title")
|
||||
self.results_table = DataTable(id="results-table")
|
||||
yield self.results_table
|
||||
yield Static("Metadata", classes="section-title")
|
||||
self.metadata_tree = Tree("Run a pipeline", id="metadata-tree")
|
||||
yield self.metadata_tree
|
||||
with Vertical(id="command-pane"):
|
||||
with Horizontal(id="command-row"):
|
||||
yield Input(placeholder="Enter pipeline command...", id="pipeline-input")
|
||||
yield Button("Run", id="run-button")
|
||||
yield Button("Tags", id="tags-button")
|
||||
yield Button("Metadata", id="metadata-button")
|
||||
yield Button("Relationships", id="relationships-button")
|
||||
yield Static("Ready", id="status-panel")
|
||||
yield OptionList(id="cmd-suggestions")
|
||||
|
||||
with Vertical(id="results-pane"):
|
||||
yield Label("Results", classes="section-title")
|
||||
yield DataTable(id="results-table")
|
||||
|
||||
with Vertical(id="bottom-pane"):
|
||||
yield Label("Store + Output", classes="section-title")
|
||||
with Horizontal(id="store-row"):
|
||||
yield Select([], id="store-select")
|
||||
yield Input(placeholder="Output path (optional)", id="output-path")
|
||||
|
||||
with Horizontal(id="logs-workers-row"):
|
||||
with Vertical(id="logs-pane"):
|
||||
yield Label("Logs", classes="section-title")
|
||||
yield TextArea(id="log-output", read_only=True)
|
||||
|
||||
with Vertical(id="workers-pane"):
|
||||
yield Label("Workers", classes="section-title")
|
||||
yield DataTable(id="workers-table")
|
||||
yield Footer()
|
||||
|
||||
def on_mount(self) -> None:
|
||||
self.command_input = self.query_one("#pipeline-input", Input)
|
||||
self.status_panel = self.query_one("#status-panel", Static)
|
||||
self.results_table = self.query_one("#results-table", DataTable)
|
||||
self.worker_table = self.query_one("#workers-table", DataTable)
|
||||
self.log_output = self.query_one("#log-output", TextArea)
|
||||
self.store_select = self.query_one("#store-select", Select)
|
||||
self.path_input = self.query_one("#output-path", Input)
|
||||
self.suggestion_list = self.query_one("#cmd-suggestions", OptionList)
|
||||
|
||||
if self.suggestion_list:
|
||||
self.suggestion_list.display = False
|
||||
|
||||
if self.results_table:
|
||||
self.results_table.cursor_type = "row"
|
||||
self.results_table.zebra_stripes = True
|
||||
self.results_table.add_columns("Row", "Title", "Source", "File")
|
||||
if self.worker_table:
|
||||
self.worker_table.add_columns("ID", "Type", "Status", "Details")
|
||||
|
||||
self._populate_store_options()
|
||||
self._load_cmdlet_names()
|
||||
if self.executor.worker_manager:
|
||||
self.set_interval(2.0, self.refresh_workers)
|
||||
self.refresh_workers()
|
||||
@@ -131,8 +318,22 @@ class PipelineHubApp(App):
|
||||
if self.command_input:
|
||||
self.command_input.focus()
|
||||
|
||||
def action_focus_logs(self) -> None:
|
||||
if self.log_output:
|
||||
self.log_output.focus()
|
||||
|
||||
def action_run_pipeline(self) -> None:
|
||||
if self._pipeline_running:
|
||||
# Self-heal if the background worker already stopped (e.g. error in thread).
|
||||
worker = self._pipeline_worker
|
||||
try:
|
||||
is_running = bool(getattr(worker, "is_running", False))
|
||||
except Exception:
|
||||
is_running = True
|
||||
if (worker is None) or (not is_running):
|
||||
self._pipeline_running = False
|
||||
self._pipeline_worker = None
|
||||
else:
|
||||
self.notify("Pipeline already running", severity="warning", timeout=3)
|
||||
return
|
||||
if not self.command_input:
|
||||
@@ -142,12 +343,33 @@ class PipelineHubApp(App):
|
||||
self.notify("Enter a pipeline to run", severity="warning", timeout=3)
|
||||
return
|
||||
|
||||
pipeline_text = self._apply_store_path_and_tags(pipeline_text)
|
||||
|
||||
self._pipeline_running = True
|
||||
self._set_status("Running…", level="info")
|
||||
self._clear_log()
|
||||
self._append_log_line(f"$ {pipeline_text}")
|
||||
self._clear_results()
|
||||
self._run_pipeline_background(pipeline_text)
|
||||
self._pipeline_worker = self._run_pipeline_background(pipeline_text)
|
||||
|
||||
@on(Input.Changed, "#pipeline-input")
|
||||
def on_pipeline_input_changed(self, event: Input.Changed) -> None:
|
||||
text = str(event.value or "")
|
||||
self._update_suggestions(text)
|
||||
self._update_syntax_status(text)
|
||||
|
||||
@on(OptionList.OptionSelected, "#cmd-suggestions")
|
||||
def on_suggestion_selected(self, event: OptionList.OptionSelected) -> None:
|
||||
if not self.command_input or not self.suggestion_list:
|
||||
return
|
||||
try:
|
||||
suggestion = str(event.option.prompt)
|
||||
except Exception:
|
||||
return
|
||||
new_text = self._apply_suggestion_to_text(str(self.command_input.value or ""), suggestion)
|
||||
self.command_input.value = new_text
|
||||
self.suggestion_list.display = False
|
||||
self.command_input.focus()
|
||||
|
||||
def action_refresh_workers(self) -> None:
|
||||
self.refresh_workers()
|
||||
@@ -158,34 +380,178 @@ class PipelineHubApp(App):
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
if event.button.id == "run-button":
|
||||
self.action_run_pipeline()
|
||||
elif event.button.id == "tags-button":
|
||||
self._open_tags_popup()
|
||||
elif event.button.id == "metadata-button":
|
||||
self._open_metadata_popup()
|
||||
elif event.button.id == "relationships-button":
|
||||
self._open_relationships_popup()
|
||||
|
||||
def on_input_submitted(self, event: Input.Submitted) -> None:
|
||||
if event.input.id == "pipeline-input":
|
||||
self.action_run_pipeline()
|
||||
|
||||
def on_list_view_selected(self, event: ListView.Selected) -> None:
|
||||
if isinstance(event.item, PresetListItem) and self.command_input:
|
||||
self.command_input.value = event.item.preset.pipeline
|
||||
self.notify(f"Loaded preset: {event.item.preset.label}", timeout=2)
|
||||
def on_key(self, event: Key) -> None:
|
||||
# Make Tab accept autocomplete when typing commands.
|
||||
if event.key != "tab":
|
||||
return
|
||||
if not self.command_input or not self.command_input.has_focus:
|
||||
return
|
||||
suggestion = self._get_first_suggestion()
|
||||
if not suggestion:
|
||||
return
|
||||
|
||||
self.command_input.value = self._apply_suggestion_to_text(str(self.command_input.value or ""), suggestion)
|
||||
if self.suggestion_list:
|
||||
self.suggestion_list.display = False
|
||||
event.prevent_default()
|
||||
event.stop()
|
||||
|
||||
def _get_first_suggestion(self) -> str:
|
||||
if not self.suggestion_list or not bool(getattr(self.suggestion_list, "display", False)):
|
||||
return ""
|
||||
# Textual OptionList API differs across versions; handle best-effort.
|
||||
try:
|
||||
options = list(getattr(self.suggestion_list, "options", []) or [])
|
||||
if options:
|
||||
first = options[0]
|
||||
return str(getattr(first, "prompt", "") or "")
|
||||
except Exception:
|
||||
pass
|
||||
return ""
|
||||
|
||||
def _populate_store_options(self) -> None:
|
||||
"""Populate the store dropdown from the configured Store registry."""
|
||||
if not self.store_select:
|
||||
return
|
||||
try:
|
||||
cfg = load_config() or {}
|
||||
except Exception:
|
||||
cfg = {}
|
||||
|
||||
stores: List[str] = []
|
||||
try:
|
||||
stores = StoreRegistry(config=cfg, suppress_debug=True).list_backends()
|
||||
except Exception:
|
||||
stores = []
|
||||
|
||||
# Always offer a reasonable default even if config is missing.
|
||||
if "local" not in [s.lower() for s in stores]:
|
||||
stores = ["local", *stores]
|
||||
|
||||
options = [(name, name) for name in stores]
|
||||
try:
|
||||
self.store_select.set_options(options)
|
||||
if options:
|
||||
current = getattr(self.store_select, "value", None)
|
||||
# Textual Select uses a sentinel for "no selection".
|
||||
if (current is None) or (current == "") or (current is Select.BLANK):
|
||||
self.store_select.value = options[0][1]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def _get_selected_store(self) -> Optional[str]:
|
||||
if not self.store_select:
|
||||
return None
|
||||
try:
|
||||
value = getattr(self.store_select, "value", None)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
if value is None or value is Select.BLANK:
|
||||
return None
|
||||
try:
|
||||
text = str(value).strip()
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
if not text or text == "Select.BLANK":
|
||||
return None
|
||||
return text
|
||||
|
||||
def _apply_store_path_and_tags(self, pipeline_text: str) -> str:
|
||||
"""Apply store/path/tags UI fields to the pipeline text.
|
||||
|
||||
Rules (simple + non-destructive):
|
||||
- If output path is set and the first stage is download-media and has no -path/--path, append -path.
|
||||
- If a store is selected and pipeline has no add-file stage, append add-file -store <store>.
|
||||
"""
|
||||
base = str(pipeline_text or "").strip()
|
||||
if not base:
|
||||
return base
|
||||
|
||||
selected_store = self._get_selected_store()
|
||||
|
||||
output_path = ""
|
||||
if self.path_input:
|
||||
try:
|
||||
output_path = str(self.path_input.value or "").strip()
|
||||
except Exception:
|
||||
output_path = ""
|
||||
|
||||
stages = [s.strip() for s in base.split("|") if s.strip()]
|
||||
if not stages:
|
||||
return base
|
||||
|
||||
# Identify first stage command name for conservative auto-augmentation.
|
||||
first_stage_cmd = ""
|
||||
try:
|
||||
first_stage_cmd = str(stages[0].split()[0]).replace("_", "-").strip().lower() if stages[0].split() else ""
|
||||
except Exception:
|
||||
first_stage_cmd = ""
|
||||
|
||||
# Apply -path to download-media first stage (only if missing)
|
||||
if output_path:
|
||||
first = stages[0]
|
||||
low = first.lower()
|
||||
if low.startswith("download-media") and " -path" not in low and " --path" not in low:
|
||||
stages[0] = f"{first} -path {json.dumps(output_path)}"
|
||||
|
||||
joined = " | ".join(stages)
|
||||
|
||||
low_joined = joined.lower()
|
||||
|
||||
# Only auto-append add-file for download pipelines.
|
||||
should_auto_add_file = bool(
|
||||
selected_store
|
||||
and ("add-file" not in low_joined)
|
||||
and (first_stage_cmd in {"download-media", "download-file", "download-torrent"})
|
||||
)
|
||||
|
||||
if should_auto_add_file:
|
||||
store_token = json.dumps(selected_store)
|
||||
joined = f"{joined} | add-file -store {store_token}"
|
||||
|
||||
return joined
|
||||
|
||||
def on_data_table_row_highlighted(self, event: DataTable.RowHighlighted) -> None:
|
||||
if not self.results_table or event.control is not self.results_table:
|
||||
return
|
||||
index = event.cursor_row
|
||||
if 0 <= index < len(self.result_items):
|
||||
self._display_metadata(index)
|
||||
index = int(event.cursor_row or 0)
|
||||
if index < 0:
|
||||
index = 0
|
||||
self._selected_row_index = index
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Pipeline execution helpers
|
||||
# ------------------------------------------------------------------
|
||||
@work(exclusive=True, thread=True)
|
||||
def _run_pipeline_background(self, pipeline_text: str) -> None:
|
||||
try:
|
||||
run_result = self.executor.run_pipeline(pipeline_text, on_log=self._log_from_worker)
|
||||
except Exception as exc:
|
||||
# Ensure the UI never gets stuck in "running" state.
|
||||
run_result = PipelineRunResult(
|
||||
pipeline=str(pipeline_text or ""),
|
||||
success=False,
|
||||
error=f"{type(exc).__name__}: {exc}",
|
||||
stderr=f"{type(exc).__name__}: {exc}",
|
||||
)
|
||||
self.call_from_thread(self._on_pipeline_finished, run_result)
|
||||
|
||||
def _on_pipeline_finished(self, run_result: PipelineRunResult) -> None:
|
||||
self._pipeline_running = False
|
||||
self._pipeline_worker = None
|
||||
status_level = "success" if run_result.success else "error"
|
||||
status_text = "Completed" if run_result.success else "Failed"
|
||||
self._set_status(status_text, level=status_level)
|
||||
@@ -219,6 +585,8 @@ class PipelineHubApp(App):
|
||||
self.current_result_table = run_result.result_table
|
||||
self._populate_results_table()
|
||||
self.refresh_workers()
|
||||
if self.result_items:
|
||||
self._selected_row_index = 0
|
||||
|
||||
def _log_from_worker(self, message: str) -> None:
|
||||
self.call_from_thread(self._append_log_line, message)
|
||||
@@ -251,35 +619,213 @@ class PipelineHubApp(App):
|
||||
for idx, item in enumerate(self.result_items, start=1):
|
||||
self.results_table.add_row(str(idx), str(item), "—", "—", key=str(idx - 1))
|
||||
|
||||
def _display_metadata(self, index: int) -> None:
|
||||
if not self.metadata_tree:
|
||||
def _load_cmdlet_names(self) -> None:
|
||||
try:
|
||||
ensure_registry_loaded()
|
||||
names = list_cmdlet_names() or []
|
||||
self._cmdlet_names = sorted({str(n).replace("_", "-") for n in names if str(n).strip()})
|
||||
except Exception:
|
||||
self._cmdlet_names = []
|
||||
|
||||
def _update_syntax_status(self, text: str) -> None:
|
||||
if self._pipeline_running:
|
||||
return
|
||||
root = self.metadata_tree.root
|
||||
root.label = "Metadata"
|
||||
root.remove_children()
|
||||
raw = str(text or "").strip()
|
||||
if not raw:
|
||||
self._set_status("Ready", level="info")
|
||||
return
|
||||
try:
|
||||
err = validate_pipeline_text(raw)
|
||||
except Exception:
|
||||
err = None
|
||||
if err:
|
||||
self._set_status(err.message, level="error")
|
||||
else:
|
||||
self._set_status("Ready", level="info")
|
||||
|
||||
if self.current_result_table and 0 <= index < len(self.current_result_table.rows):
|
||||
def _update_suggestions(self, text: str) -> None:
|
||||
if not self.suggestion_list:
|
||||
return
|
||||
raw = str(text or "")
|
||||
prefix = self._current_cmd_prefix(raw)
|
||||
if not prefix:
|
||||
self.suggestion_list.display = False
|
||||
return
|
||||
|
||||
pref_low = prefix.lower()
|
||||
matches = [n for n in self._cmdlet_names if n.lower().startswith(pref_low)]
|
||||
matches = matches[:10]
|
||||
|
||||
if not matches:
|
||||
self.suggestion_list.display = False
|
||||
return
|
||||
|
||||
try:
|
||||
self.suggestion_list.clear_options() # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
try:
|
||||
# Fallback for older/newer Textual APIs.
|
||||
self.suggestion_list.options = [] # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
self.suggestion_list.add_options([Option(m) for m in matches]) # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
try:
|
||||
self.suggestion_list.options = [Option(m) for m in matches] # type: ignore[attr-defined]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.suggestion_list.display = True
|
||||
|
||||
@staticmethod
|
||||
def _current_cmd_prefix(text: str) -> str:
|
||||
"""Best-effort prefix for cmdlet name completion.
|
||||
|
||||
Completes the token immediately after start-of-line or a '|'.
|
||||
"""
|
||||
raw = str(text or "")
|
||||
# Find the segment after the last pipe.
|
||||
segment = raw.split("|")[-1]
|
||||
# Remove leading whitespace.
|
||||
segment = segment.lstrip()
|
||||
if not segment:
|
||||
return ""
|
||||
# Only complete the first token of the segment.
|
||||
m = re.match(r"([A-Za-z0-9_\-]*)", segment)
|
||||
return m.group(1) if m else ""
|
||||
|
||||
@staticmethod
|
||||
def _apply_suggestion_to_text(text: str, suggestion: str) -> str:
|
||||
raw = str(text or "")
|
||||
parts = raw.split("|")
|
||||
if not parts:
|
||||
return suggestion
|
||||
last = parts[-1]
|
||||
# Preserve leading spaces after the pipe.
|
||||
leading = "".join(ch for ch in last if ch.isspace())
|
||||
trimmed = last.lstrip()
|
||||
# Replace first token in last segment.
|
||||
replaced = re.sub(r"^[A-Za-z0-9_\-]*", suggestion, trimmed)
|
||||
parts[-1] = leading + replaced
|
||||
return "|".join(parts)
|
||||
|
||||
def _resolve_selected_item(self) -> Tuple[Optional[Any], Optional[str], Optional[str]]:
|
||||
"""Return (item, store_name, hash) for the currently selected row."""
|
||||
index = int(getattr(self, "_selected_row_index", 0) or 0)
|
||||
if index < 0:
|
||||
index = 0
|
||||
|
||||
item: Any = None
|
||||
|
||||
# Prefer mapping displayed table row -> source item.
|
||||
if self.current_result_table and 0 <= index < len(getattr(self.current_result_table, "rows", []) or []):
|
||||
row = self.current_result_table.rows[index]
|
||||
for col in row.columns:
|
||||
root.add(f"[b]{col.name}[/b]: {col.value}")
|
||||
elif 0 <= index < len(self.result_items):
|
||||
item = self.result_items[index]
|
||||
if isinstance(item, dict):
|
||||
self._populate_tree_node(root, item)
|
||||
else:
|
||||
root.add(str(item))
|
||||
src_idx = getattr(row, "source_index", None)
|
||||
if isinstance(src_idx, int) and 0 <= src_idx < len(self.result_items):
|
||||
item = self.result_items[src_idx]
|
||||
|
||||
def _populate_tree_node(self, node, data: Any) -> None:
|
||||
if isinstance(data, dict):
|
||||
for key, value in data.items():
|
||||
child = node.add(f"[b]{key}[/b]")
|
||||
self._populate_tree_node(child, value)
|
||||
elif isinstance(data, Sequence) and not isinstance(data, (str, bytes)):
|
||||
for idx, value in enumerate(data):
|
||||
child = node.add(f"[{idx}]")
|
||||
self._populate_tree_node(child, value)
|
||||
if item is None and 0 <= index < len(self.result_items):
|
||||
item = self.result_items[index]
|
||||
|
||||
store_name = None
|
||||
file_hash = None
|
||||
if isinstance(item, dict):
|
||||
store_name = item.get("store")
|
||||
file_hash = item.get("hash")
|
||||
else:
|
||||
node.add(str(data))
|
||||
store_name = getattr(item, "store", None)
|
||||
file_hash = getattr(item, "hash", None)
|
||||
|
||||
store_text = str(store_name).strip() if store_name is not None else ""
|
||||
hash_text = str(file_hash).strip() if file_hash is not None else ""
|
||||
|
||||
if not store_text:
|
||||
# Fallback to UI store selection when item doesn't carry it.
|
||||
store_text = self._get_selected_store() or ""
|
||||
|
||||
return item, (store_text or None), (hash_text or None)
|
||||
|
||||
def _open_tags_popup(self) -> None:
|
||||
if self._pipeline_running:
|
||||
self.notify("Pipeline already running", severity="warning", timeout=3)
|
||||
return
|
||||
item, store_name, file_hash = self._resolve_selected_item()
|
||||
if item is None:
|
||||
self.notify("No selected item", severity="warning", timeout=3)
|
||||
return
|
||||
if not store_name:
|
||||
self.notify("Selected item missing store", severity="warning", timeout=4)
|
||||
return
|
||||
|
||||
seeds: Any = item
|
||||
if isinstance(item, dict):
|
||||
seeds = dict(item)
|
||||
try:
|
||||
if store_name and not str(seeds.get("store") or "").strip():
|
||||
seeds["store"] = store_name
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
if file_hash and not str(seeds.get("hash") or "").strip():
|
||||
seeds["hash"] = file_hash
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
self.push_screen(TagEditorPopup(seeds=seeds, store_name=store_name, file_hash=file_hash))
|
||||
|
||||
def _open_metadata_popup(self) -> None:
|
||||
item, _store_name, _file_hash = self._resolve_selected_item()
|
||||
if item is None:
|
||||
self.notify("No selected item", severity="warning", timeout=3)
|
||||
return
|
||||
text = ""
|
||||
idx = int(getattr(self, "_selected_row_index", 0) or 0)
|
||||
if self.current_result_table and 0 <= idx < len(getattr(self.current_result_table, "rows", []) or []):
|
||||
row = self.current_result_table.rows[idx]
|
||||
lines = [f"{col.name}: {col.value}" for col in getattr(row, "columns", []) or []]
|
||||
text = "\n".join(lines)
|
||||
elif isinstance(item, dict):
|
||||
try:
|
||||
text = json.dumps(item, indent=2, ensure_ascii=False)
|
||||
except Exception:
|
||||
text = str(item)
|
||||
else:
|
||||
text = str(item)
|
||||
self.push_screen(TextPopup(title="Metadata", text=text))
|
||||
|
||||
def _open_relationships_popup(self) -> None:
|
||||
item, _store_name, _file_hash = self._resolve_selected_item()
|
||||
if item is None:
|
||||
self.notify("No selected item", severity="warning", timeout=3)
|
||||
return
|
||||
|
||||
relationships = None
|
||||
if isinstance(item, dict):
|
||||
relationships = item.get("relationships") or item.get("relationship")
|
||||
else:
|
||||
relationships = getattr(item, "relationships", None)
|
||||
if not relationships:
|
||||
relationships = getattr(item, "get_relationships", lambda: None)()
|
||||
|
||||
if not relationships:
|
||||
self.push_screen(TextPopup(title="Relationships", text="No relationships"))
|
||||
return
|
||||
|
||||
lines: List[str] = []
|
||||
if isinstance(relationships, dict):
|
||||
for rel_type, value in relationships.items():
|
||||
if isinstance(value, list):
|
||||
if not value:
|
||||
lines.append(f"{rel_type}: (empty)")
|
||||
for v in value:
|
||||
lines.append(f"{rel_type}: {v}")
|
||||
else:
|
||||
lines.append(f"{rel_type}: {value}")
|
||||
else:
|
||||
lines.append(str(relationships))
|
||||
self.push_screen(TextPopup(title="Relationships", text="\n".join(lines)))
|
||||
|
||||
def _clear_log(self) -> None:
|
||||
self.log_lines = []
|
||||
@@ -301,9 +847,7 @@ class PipelineHubApp(App):
|
||||
self.result_items = []
|
||||
if self.results_table:
|
||||
self.results_table.clear()
|
||||
if self.metadata_tree:
|
||||
self.metadata_tree.root.label = "Awaiting results"
|
||||
self.metadata_tree.root.remove_children()
|
||||
self._selected_row_index = 0
|
||||
|
||||
def _set_status(self, message: str, *, level: str = "info") -> None:
|
||||
if not self.status_panel:
|
||||
|
||||
113
TUI/tui.tcss
113
TUI/tui.tcss
@@ -14,6 +14,11 @@
|
||||
border: round $primary;
|
||||
}
|
||||
|
||||
#command-row {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
#pipeline-input {
|
||||
width: 1fr;
|
||||
min-height: 3;
|
||||
@@ -38,22 +43,61 @@
|
||||
border: solid $panel-darken-1;
|
||||
}
|
||||
|
||||
#content-row {
|
||||
#cmd-suggestions {
|
||||
width: 100%;
|
||||
height: 1fr;
|
||||
height: auto;
|
||||
max-height: 8;
|
||||
margin-top: 1;
|
||||
background: $surface;
|
||||
border: round $panel-darken-2;
|
||||
}
|
||||
|
||||
#left-pane,
|
||||
#right-pane {
|
||||
#results-pane {
|
||||
width: 100%;
|
||||
height: 2fr;
|
||||
padding: 1;
|
||||
background: $panel;
|
||||
border: round $panel-darken-2;
|
||||
margin-top: 1;
|
||||
}
|
||||
|
||||
#store-select {
|
||||
width: 24;
|
||||
margin-right: 2;
|
||||
height: 3;
|
||||
}
|
||||
|
||||
#output-path {
|
||||
width: 1fr;
|
||||
height: 100%;
|
||||
height: 3;
|
||||
}
|
||||
|
||||
|
||||
#bottom-pane {
|
||||
width: 100%;
|
||||
height: 1fr;
|
||||
padding: 1;
|
||||
background: $panel;
|
||||
border: round $panel-darken-2;
|
||||
}
|
||||
|
||||
#left-pane {
|
||||
max-width: 60;
|
||||
|
||||
#store-row {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
#logs-workers-row {
|
||||
width: 100%;
|
||||
height: 1fr;
|
||||
margin-top: 1;
|
||||
}
|
||||
|
||||
#logs-pane,
|
||||
#workers-pane {
|
||||
width: 1fr;
|
||||
height: 100%;
|
||||
padding: 0 1;
|
||||
}
|
||||
|
||||
.section-title {
|
||||
@@ -62,33 +106,19 @@
|
||||
margin-top: 1;
|
||||
}
|
||||
|
||||
.preset-entry {
|
||||
padding: 1;
|
||||
border: tall $panel-darken-1;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
#preset-list {
|
||||
height: 25;
|
||||
border: solid $secondary;
|
||||
}
|
||||
|
||||
#log-output {
|
||||
height: 16;
|
||||
height: 1fr;
|
||||
}
|
||||
|
||||
#workers-table {
|
||||
height: auto;
|
||||
height: 1fr;
|
||||
}
|
||||
|
||||
#results-table {
|
||||
height: 1fr;
|
||||
}
|
||||
|
||||
#metadata-tree {
|
||||
height: 1fr;
|
||||
border: round $panel-darken-1;
|
||||
}
|
||||
|
||||
|
||||
.status-info {
|
||||
background: $boost;
|
||||
@@ -110,3 +140,38 @@
|
||||
min-width: 10;
|
||||
margin: 0 1;
|
||||
}
|
||||
|
||||
#tags-button,
|
||||
#metadata-button,
|
||||
#relationships-button {
|
||||
width: auto;
|
||||
min-width: 12;
|
||||
margin: 0 1;
|
||||
}
|
||||
|
||||
#popup-title {
|
||||
width: 100%;
|
||||
height: 3;
|
||||
text-style: bold;
|
||||
content-align: center middle;
|
||||
border: round $panel-darken-2;
|
||||
background: $boost;
|
||||
}
|
||||
|
||||
#popup-text,
|
||||
#tags-editor {
|
||||
height: 1fr;
|
||||
border: round $panel-darken-2;
|
||||
}
|
||||
|
||||
#tags-buttons {
|
||||
width: 100%;
|
||||
height: auto;
|
||||
margin-top: 1;
|
||||
}
|
||||
|
||||
#tags-status {
|
||||
width: 1fr;
|
||||
height: 3;
|
||||
content-align: left middle;
|
||||
}
|
||||
@@ -1328,6 +1328,38 @@ def _unique_destination_path(dest: Path) -> Path:
|
||||
return dest
|
||||
|
||||
|
||||
def _print_live_safe_stderr(message: str) -> None:
|
||||
"""Print to stderr without breaking Rich Live progress output."""
|
||||
try:
|
||||
from rich_display import stderr_console # type: ignore
|
||||
except Exception:
|
||||
return
|
||||
|
||||
cm = None
|
||||
try:
|
||||
import pipeline as _pipeline_ctx # type: ignore
|
||||
suspend = getattr(_pipeline_ctx, "suspend_live_progress", None)
|
||||
cm = suspend() if callable(suspend) else None
|
||||
except Exception:
|
||||
cm = None
|
||||
|
||||
try:
|
||||
from contextlib import nullcontext
|
||||
except Exception:
|
||||
nullcontext = None # type: ignore
|
||||
if cm is None:
|
||||
cm = nullcontext() if callable(nullcontext) else None
|
||||
|
||||
try:
|
||||
if cm is not None:
|
||||
with cm:
|
||||
stderr_console.print(str(message))
|
||||
else:
|
||||
stderr_console.print(str(message))
|
||||
except Exception:
|
||||
return
|
||||
|
||||
|
||||
def apply_output_path_from_pipeobjects(
|
||||
*,
|
||||
cmd_name: str,
|
||||
@@ -1350,6 +1382,16 @@ def apply_output_path_from_pipeobjects(
|
||||
if not dest_raw:
|
||||
return list(emits or [])
|
||||
|
||||
# Guard: users sometimes pass a URL into -path by mistake (e.g. `-path https://...`).
|
||||
# Treat that as invalid for filesystem moves and avoid breaking Rich Live output.
|
||||
try:
|
||||
dest_str = str(dest_raw).strip()
|
||||
if "://" in dest_str:
|
||||
_print_live_safe_stderr(f"Ignoring -path value that looks like a URL: {dest_str}")
|
||||
return list(emits or [])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
cmd_norm = str(cmd_name or "").replace("_", "-").strip().lower()
|
||||
if not cmd_norm:
|
||||
return list(emits or [])
|
||||
@@ -1410,7 +1452,7 @@ def apply_output_path_from_pipeobjects(
|
||||
try:
|
||||
dest_dir.mkdir(parents=True, exist_ok=True)
|
||||
except Exception as exc:
|
||||
log(f"Failed to create destination directory: {dest_dir} ({exc})", file=sys.stderr)
|
||||
_print_live_safe_stderr(f"Failed to create destination directory: {dest_dir} ({exc})")
|
||||
return items
|
||||
|
||||
for idx, src in zip(artifact_indices, artifact_paths):
|
||||
@@ -1418,15 +1460,18 @@ def apply_output_path_from_pipeobjects(
|
||||
final = _unique_destination_path(final)
|
||||
try:
|
||||
if src.resolve() == final.resolve():
|
||||
_apply_saved_path_update(items[idx], old_path=str(src), new_path=str(final))
|
||||
_print_saved_output_panel(items[idx], final)
|
||||
continue
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
shutil.move(str(src), str(final))
|
||||
except Exception as exc:
|
||||
log(f"Failed to save output to {final}: {exc}", file=sys.stderr)
|
||||
_print_live_safe_stderr(f"Failed to save output to {final}: {exc}")
|
||||
continue
|
||||
_apply_saved_path_update(items[idx], old_path=str(src), new_path=str(final))
|
||||
_print_saved_output_panel(items[idx], final)
|
||||
|
||||
return items
|
||||
|
||||
@@ -1445,7 +1490,7 @@ def apply_output_path_from_pipeobjects(
|
||||
try:
|
||||
final.parent.mkdir(parents=True, exist_ok=True)
|
||||
except Exception as exc:
|
||||
log(f"Failed to create destination directory: {final.parent} ({exc})", file=sys.stderr)
|
||||
_print_live_safe_stderr(f"Failed to create destination directory: {final.parent} ({exc})")
|
||||
return items
|
||||
|
||||
final = _unique_destination_path(final)
|
||||
@@ -1453,13 +1498,89 @@ def apply_output_path_from_pipeobjects(
|
||||
if src.resolve() != final.resolve():
|
||||
shutil.move(str(src), str(final))
|
||||
except Exception as exc:
|
||||
log(f"Failed to save output to {final}: {exc}", file=sys.stderr)
|
||||
_print_live_safe_stderr(f"Failed to save output to {final}: {exc}")
|
||||
return items
|
||||
|
||||
_apply_saved_path_update(items[idx], old_path=str(src), new_path=str(final))
|
||||
_print_saved_output_panel(items[idx], final)
|
||||
return items
|
||||
|
||||
|
||||
def _print_saved_output_panel(item: Any, final_path: Path) -> None:
|
||||
"""When -path is used, print a Rich panel summarizing the saved output.
|
||||
|
||||
Shows: Title, Location, Hash.
|
||||
Best-effort: reads existing fields first to avoid recomputing hashes.
|
||||
"""
|
||||
try:
|
||||
from rich.panel import Panel # type: ignore
|
||||
from rich.table import Table # type: ignore
|
||||
from rich_display import stderr_console # type: ignore
|
||||
except Exception:
|
||||
return
|
||||
|
||||
# If Rich Live progress is active, pause it while printing so the panel
|
||||
# doesn't get overwritten/truncated by Live's cursor control.
|
||||
try:
|
||||
import pipeline as _pipeline_ctx # type: ignore
|
||||
suspend = getattr(_pipeline_ctx, "suspend_live_progress", None)
|
||||
cm = suspend() if callable(suspend) else None
|
||||
except Exception:
|
||||
cm = None
|
||||
|
||||
try:
|
||||
from contextlib import nullcontext
|
||||
except Exception:
|
||||
nullcontext = None # type: ignore
|
||||
|
||||
if cm is None:
|
||||
cm = nullcontext() if callable(nullcontext) else None
|
||||
|
||||
try:
|
||||
location = str(final_path)
|
||||
except Exception:
|
||||
location = ""
|
||||
|
||||
title = ""
|
||||
try:
|
||||
title = str(get_field(item, "title") or get_field(item, "name") or "").strip()
|
||||
except Exception:
|
||||
title = ""
|
||||
if not title:
|
||||
try:
|
||||
title = str(final_path.stem or final_path.name)
|
||||
except Exception:
|
||||
title = ""
|
||||
|
||||
file_hash = ""
|
||||
try:
|
||||
file_hash = str(get_field(item, "hash") or get_field(item, "sha256") or "").strip()
|
||||
except Exception:
|
||||
file_hash = ""
|
||||
if not file_hash:
|
||||
try:
|
||||
from SYS.utils import sha256_file # type: ignore
|
||||
file_hash = str(sha256_file(final_path) or "").strip()
|
||||
except Exception:
|
||||
file_hash = ""
|
||||
|
||||
grid = Table.grid(padding=(0, 1))
|
||||
grid.add_column(justify="right", style="bold")
|
||||
grid.add_column()
|
||||
grid.add_row("Title", title or "(unknown)")
|
||||
grid.add_row("Location", location or "(unknown)")
|
||||
grid.add_row("Hash", file_hash or "(unknown)")
|
||||
|
||||
try:
|
||||
if cm is not None:
|
||||
with cm:
|
||||
stderr_console.print(Panel(grid, title="Saved", expand=False))
|
||||
else:
|
||||
stderr_console.print(Panel(grid, title="Saved", expand=False))
|
||||
except Exception:
|
||||
return
|
||||
|
||||
|
||||
def _apply_saved_path_update(item: Any, *, old_path: str, new_path: str) -> None:
|
||||
"""Update a PipeObject-like item after its backing file has moved."""
|
||||
old_str = str(old_path)
|
||||
@@ -1952,9 +2073,6 @@ def coerce_to_pipe_object(value: Any, default_path: Optional[str] = None) -> mod
|
||||
extra=extra,
|
||||
)
|
||||
|
||||
# Debug: Print formatted table
|
||||
pipe_obj.debug_table()
|
||||
|
||||
return pipe_obj
|
||||
|
||||
# Fallback: build from path argument or bare value
|
||||
@@ -2000,9 +2118,6 @@ def coerce_to_pipe_object(value: Any, default_path: Optional[str] = None) -> mod
|
||||
extra={},
|
||||
)
|
||||
|
||||
# Debug: Print formatted table
|
||||
pipe_obj.debug_table()
|
||||
|
||||
return pipe_obj
|
||||
|
||||
|
||||
|
||||
@@ -1077,7 +1077,11 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
|
||||
|
||||
# Handle Save Playlist
|
||||
if save_mode:
|
||||
playlist_name = index_arg or f"Playlist {subprocess.check_output(['date', '/t'], shell=True).decode().strip()}"
|
||||
# Avoid `shell=True` / `date /t` on Windows (can flash a cmd.exe window).
|
||||
# Use Python's datetime instead.
|
||||
from datetime import datetime
|
||||
|
||||
playlist_name = index_arg or f"Playlist {datetime.now().strftime('%Y-%m-%d')}"
|
||||
# If index_arg was used for name, clear it so it doesn't trigger index logic
|
||||
if index_arg:
|
||||
index_arg = None
|
||||
@@ -1193,12 +1197,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
|
||||
ctx.set_last_result_table_overlay(table, [p['items'] for p in playlists])
|
||||
ctx.set_current_stage_table(table)
|
||||
|
||||
# In pipeline mode, the CLI renders current-stage tables; printing here duplicates output.
|
||||
suppress_direct_print = bool(isinstance(config, dict) and config.get("_quiet_background_output"))
|
||||
if not suppress_direct_print:
|
||||
from rich_display import stdout_console
|
||||
|
||||
stdout_console().print(table)
|
||||
# Do not print directly here.
|
||||
# Both CmdletExecutor and PipelineExecutor render the current-stage/overlay table,
|
||||
# so printing here would duplicate output.
|
||||
return 0
|
||||
|
||||
# Everything below was originally outside a try block; keep it inside so `start_opts` is in scope.
|
||||
@@ -1513,12 +1514,9 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
|
||||
ctx.set_last_result_table_overlay(table, pipe_objects)
|
||||
ctx.set_current_stage_table(table)
|
||||
|
||||
# In pipeline mode, the CLI renders current-stage tables; printing here duplicates output.
|
||||
suppress_direct_print = bool(isinstance(config, dict) and config.get("_quiet_background_output"))
|
||||
if not suppress_direct_print:
|
||||
from rich_display import stdout_console
|
||||
|
||||
stdout_console().print(table)
|
||||
# Do not print directly here.
|
||||
# Both CmdletExecutor and PipelineExecutor render the current-stage/overlay table,
|
||||
# so printing here would duplicate output.
|
||||
|
||||
return 0
|
||||
finally:
|
||||
|
||||
@@ -32,6 +32,7 @@ Notes
|
||||
|
||||
- On Windows you may need to run PowerShell with an appropriate ExecutionPolicy (example shows using `-ExecutionPolicy Bypass`).
|
||||
- The scripts default to a venv directory named `.venv` in the repository root. Use `-VenvPath` (PowerShell) or `--venv` (bash) to choose a different directory.
|
||||
- The scripts will also install Playwright browser binaries by default (Chromium only) after installing Python dependencies. Use `--no-playwright` (bash) or `-NoPlaywright` (PowerShell) to opt out, or `--playwright-browsers <list>` / `-PlaywrightBrowsers <list>` to request specific engines (comma-separated, or use `all` to install all engines).
|
||||
- The scripts are intended to make day-to-day developer setup easy; tweak flags for your desired install mode (editable vs normal) and shortcut preferences.
|
||||
|
||||
## Deno — installed by bootstrap
|
||||
@@ -82,3 +83,31 @@ DENO_VERSION=v1.34.3 ./scripts/bootstrap.sh
|
||||
If you'd like, I can also:
|
||||
- Add a short README section in `readme.md` referencing this doc, or
|
||||
- Add a small icon and polish Linux desktop entries with an icon path.
|
||||
|
||||
## Troubleshooting: urllib3 / urllib3-future conflicts ⚠️
|
||||
|
||||
On some environments a third-party package (for example `urllib3-future`) may
|
||||
install a site-packages hook that interferes with the real `urllib3` package.
|
||||
When this happens you might see errors like:
|
||||
|
||||
Error importing cmdlet 'get_tag': No module named 'urllib3.exceptions'
|
||||
|
||||
The bootstrap scripts now run a verification step after installing dependencies
|
||||
and will stop if a broken `urllib3` is detected to avoid leaving you with a
|
||||
partially broken venv.
|
||||
|
||||
Recommended fix (activate the venv first or use the venv python explicitly):
|
||||
|
||||
PowerShell / Windows (from repo root):
|
||||
|
||||
.venv\Scripts\python.exe -m pip uninstall urllib3-future -y
|
||||
.venv\Scripts\python.exe -m pip install --upgrade --force-reinstall urllib3
|
||||
.venv\Scripts\python.exe -m pip install niquests -U
|
||||
|
||||
POSIX (Linux/macOS):
|
||||
|
||||
.venv/bin/python -m pip uninstall urllib3-future -y
|
||||
.venv/bin/python -m pip install --upgrade --force-reinstall urllib3
|
||||
.venv/bin/python -m pip install niquests -U
|
||||
|
||||
If problems persist, re-run the bootstrap script after applying the fixes.
|
||||
|
||||
44
docs/ISSUES/urllib3-future.md
Normal file
44
docs/ISSUES/urllib3-future.md
Normal file
@@ -0,0 +1,44 @@
|
||||
Title: urllib3-future .pth hook can leave urllib3 broken after installation
|
||||
|
||||
Description:
|
||||
|
||||
We observed environments where installing `urllib3-future` (or packages
|
||||
that depend on it, such as `niquests`) leaves a `.pth` file in site-packages
|
||||
that overrides `urllib3` in a way that removes expected attributes (e.g.:
|
||||
`__version__`, `exceptions`) and causes import-time failures in downstream
|
||||
projects (e.g., `No module named 'urllib3.exceptions'`).
|
||||
|
||||
Steps to reproduce (rough):
|
||||
|
||||
1. Install `urllib3` and `urllib3-future` (or a package that depends on it)
|
||||
2. Observe that `import urllib3` may succeed but `urllib3.exceptions` or
|
||||
`urllib3.__version__` are missing, or `importlib.util.find_spec('urllib3.exceptions')`
|
||||
returns `None`.
|
||||
|
||||
Impact:
|
||||
|
||||
- Downstream packages that expect modern `urllib3` behavior break in subtle
|
||||
ways at import time.
|
||||
|
||||
Suggested actions for upstream:
|
||||
|
||||
- Avoid using a `.pth` that replaces or mutates the `urllib3` package in-place,
|
||||
or ensure it keeps the original `urllib3` semantics intact (i.e., do not create
|
||||
a namespace that hides core attributes/members).
|
||||
- Provide clear upgrade/migration notes for hosts that may have mixed
|
||||
`urllib3` and `urllib3-future` installed.
|
||||
|
||||
Notes / local workaround:
|
||||
|
||||
- In our project we implemented a startup compatibility check and fail-fast
|
||||
guidance that suggests running:
|
||||
|
||||
python -m pip uninstall urllib3-future -y
|
||||
python -m pip install --upgrade --force-reinstall urllib3
|
||||
python -m pip install niquests -U
|
||||
|
||||
and we added CI smoke tests and bootstrap verification so the problem is
|
||||
caught during setup rather than later at runtime.
|
||||
|
||||
Please consider this a friendly bug report and feel free to ask for any
|
||||
additional diagnostics or reproduction details.
|
||||
8
docs/KNOWN_ISSUES.md
Normal file
8
docs/KNOWN_ISSUES.md
Normal file
@@ -0,0 +1,8 @@
|
||||
Known issues and brief remediation steps
|
||||
|
||||
- urllib3 / urllib3-future conflict
|
||||
- Symptom: `No module named 'urllib3.exceptions'` or missing `urllib3.__version__`.
|
||||
- Root cause: a `.pth` file or packaging hook from `urllib3-future` may mutate the
|
||||
`urllib3` namespace in incompatible ways.
|
||||
- Remediation: uninstall `urllib3-future`, reinstall `urllib3`, and re-install
|
||||
`niquests` if required. See `docs/ISSUES/urllib3-future.md` for more details.
|
||||
@@ -1,13 +1,60 @@
|
||||
"""Entry point wrapper for Medeia-Macina CLI."""
|
||||
"""Entry point wrapper for Medeia-Macina CLI.
|
||||
|
||||
This file is intentionally backwards-compatible. When installed from the
|
||||
packaged distribution the preferred entry is `medeia_macina.cli_entry.main`.
|
||||
When running from the repository (or in legacy installs) the module will
|
||||
attempt to import `MedeiaCLI` from the top-level `CLI` module.
|
||||
"""
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Add the current directory to sys.path so we can import CLI
|
||||
root_dir = Path(__file__).parent
|
||||
if str(root_dir) not in sys.path:
|
||||
|
||||
def _run_packaged_entry(argv=None) -> int:
|
||||
"""Try to delegate to the packaged entry (`medeia_macina.cli_entry:main`)."""
|
||||
try:
|
||||
from medeia_macina.cli_entry import main as _main
|
||||
|
||||
return int(_main(argv) or 0)
|
||||
except Exception:
|
||||
return -1
|
||||
|
||||
|
||||
def _run_legacy_entry() -> None:
|
||||
"""Legacy behaviour: make repo root importable and run CLI.
|
||||
|
||||
This supports running directly from the source tree where `CLI.py` is
|
||||
available as a top-level module.
|
||||
"""
|
||||
root_dir = Path(__file__).resolve().parent
|
||||
if str(root_dir) not in sys.path:
|
||||
sys.path.insert(0, str(root_dir))
|
||||
|
||||
from CLI import MedeiaCLI
|
||||
try:
|
||||
from CLI import MedeiaCLI
|
||||
except Exception as exc: # pragma: no cover - user environment issues
|
||||
raise ImportError(
|
||||
"Could not import 'MedeiaCLI' from top-level 'CLI'. "
|
||||
"If you installed the package into a virtualenv, activate it and run: \n"
|
||||
" pip install -e .\n"
|
||||
"or re-run the project bootstrap to ensure an up-to-date install."
|
||||
) from exc
|
||||
|
||||
if __name__ == "__main__":
|
||||
MedeiaCLI().run()
|
||||
|
||||
|
||||
# Backward-compatibility: try to expose `MedeiaCLI` at import-time when the
|
||||
# project is being used from a development checkout (so modules that import
|
||||
# the top-level `medeia_entry` can still access the CLI class).
|
||||
try:
|
||||
from CLI import MedeiaCLI as MedeiaCLI # type: ignore
|
||||
except Exception:
|
||||
# It's okay if the legacy top-level CLI isn't importable in installed packages.
|
||||
pass
|
||||
|
||||
if __name__ == "__main__":
|
||||
MedeiaCLI().run()
|
||||
rc = _run_packaged_entry(sys.argv[1:])
|
||||
if rc >= 0:
|
||||
raise SystemExit(rc)
|
||||
# Fall back to legacy import when packaged entry couldn't be invoked.
|
||||
_run_legacy_entry()
|
||||
|
||||
@@ -140,7 +140,9 @@ def _import_medeia_entry_module():
|
||||
return importlib.import_module("medeia_entry")
|
||||
|
||||
raise ImportError(
|
||||
"Could not import 'medeia_entry'. Ensure the project was installed properly or run from the repo root."
|
||||
"Could not import 'medeia_entry'. This often means the package is not installed into the active virtualenv or is an outdated install.\n"
|
||||
"Remedy: activate your venv and run: pip install -e . (or re-run the bootstrap script).\n"
|
||||
"If problems persist, recreate the venv and reinstall the project."
|
||||
)
|
||||
|
||||
|
||||
@@ -152,10 +154,26 @@ def _run_cli(clean_args: List[str]) -> int:
|
||||
pass
|
||||
|
||||
mod = _import_medeia_entry_module()
|
||||
try:
|
||||
|
||||
# Backwards compatibility: the imported module may not expose `MedeiaCLI` as
|
||||
# an attribute (for example, the installed `medeia_entry` delegates to the
|
||||
# packaged entrypoint instead of importing the top-level `CLI` module at
|
||||
# import-time). Try a few strategies to obtain or invoke the CLI:
|
||||
MedeiaCLI = None
|
||||
if hasattr(mod, "MedeiaCLI"):
|
||||
MedeiaCLI = getattr(mod, "MedeiaCLI")
|
||||
except AttributeError:
|
||||
raise ImportError("Imported module 'medeia_entry' does not define 'MedeiaCLI'")
|
||||
else:
|
||||
# Try importing the top-level `CLI` module directly (editable/repo mode)
|
||||
try:
|
||||
from CLI import MedeiaCLI as _M # type: ignore
|
||||
|
||||
MedeiaCLI = _M
|
||||
except Exception:
|
||||
raise ImportError(
|
||||
"Imported module 'medeia_entry' does not define 'MedeiaCLI' and direct import of top-level 'CLI' failed.\n"
|
||||
"Remedy: activate your venv and run: pip install -e . (or re-run the bootstrap script).\n"
|
||||
"If problems persist, recreate the venv and reinstall the project."
|
||||
)
|
||||
|
||||
try:
|
||||
app = MedeiaCLI()
|
||||
@@ -209,6 +227,22 @@ def main(argv: Optional[List[str]] = None) -> int:
|
||||
print(f"Error parsing mode flags: {exc}", file=sys.stderr)
|
||||
return 2
|
||||
|
||||
# Early environment sanity check to detect urllib3/urllib3-future conflicts.
|
||||
# When a broken urllib3 is detected we print an actionable message and
|
||||
# exit early to avoid confusing import-time errors later during startup.
|
||||
try:
|
||||
from SYS.env_check import ensure_urllib3_ok
|
||||
try:
|
||||
ensure_urllib3_ok(exit_on_error=True)
|
||||
except SystemExit as exc:
|
||||
# Bubble out the exit code as the CLI return value for clearer
|
||||
# behavior in shell sessions and scripts.
|
||||
return int(getattr(exc, "code", 2) or 2)
|
||||
except Exception:
|
||||
# If the sanity check itself cannot be imported or run, don't block
|
||||
# startup; we'll continue and let normal import errors surface.
|
||||
pass
|
||||
|
||||
# If GUI requested, delegate directly (GUI may decide to honor any args itself)
|
||||
if mode == "gui":
|
||||
return _run_gui(clean_args)
|
||||
|
||||
@@ -9,8 +9,9 @@ output).
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import sys
|
||||
from typing import Any, TextIO
|
||||
from typing import Any, Iterator, TextIO
|
||||
|
||||
from rich.console import Console
|
||||
|
||||
@@ -50,3 +51,24 @@ def console_for(file: TextIO | None) -> Console:
|
||||
|
||||
def rprint(renderable: Any = "", *, file: TextIO | None = None) -> None:
|
||||
console_for(file).print(renderable)
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def capture_rich_output(*, stdout: TextIO, stderr: TextIO) -> Iterator[None]:
|
||||
"""Temporarily redirect Rich output helpers to provided streams.
|
||||
|
||||
Note: `stdout_console()` / `stderr_console()` use global Console instances,
|
||||
so `contextlib.redirect_stdout` alone will not capture Rich output.
|
||||
"""
|
||||
|
||||
global _STDOUT_CONSOLE, _STDERR_CONSOLE
|
||||
|
||||
previous_stdout = _STDOUT_CONSOLE
|
||||
previous_stderr = _STDERR_CONSOLE
|
||||
try:
|
||||
_STDOUT_CONSOLE = Console(file=stdout)
|
||||
_STDERR_CONSOLE = Console(file=stderr)
|
||||
yield
|
||||
finally:
|
||||
_STDOUT_CONSOLE = previous_stdout
|
||||
_STDERR_CONSOLE = previous_stderr
|
||||
|
||||
@@ -25,6 +25,8 @@ param(
|
||||
[string]$Python = "",
|
||||
[switch]$Force,
|
||||
[switch]$NoInstall,
|
||||
[switch]$NoPlaywright,
|
||||
[string]$PlaywrightBrowsers = "chromium",
|
||||
[switch]$Quiet
|
||||
)
|
||||
|
||||
@@ -146,17 +148,56 @@ if (-not $NoInstall) {
|
||||
} catch {
|
||||
Write-Log "pip install failed: $_" "ERROR"; exit 6
|
||||
}
|
||||
} else {
|
||||
Write-Log "Skipping install (--NoInstall set)"
|
||||
}
|
||||
|
||||
# Install Deno (official installer) - installed automatically
|
||||
try {
|
||||
$denoCmd = Get-Command 'deno' -ErrorAction SilentlyContinue
|
||||
} catch {
|
||||
$denoCmd = $null
|
||||
}
|
||||
if ($denoCmd) {
|
||||
# Install Playwright browsers (default: chromium) unless explicitly disabled
|
||||
if (-not $NoPlaywright) {
|
||||
Write-Log "Ensuring Playwright browsers are installed (browsers=$PlaywrightBrowsers)..."
|
||||
try {
|
||||
& $venvPython -c "import importlib; importlib.import_module('playwright')" 2>$null
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Log "'playwright' package not found in venv; installing via pip..."
|
||||
& $venvPython -m pip install playwright
|
||||
}
|
||||
} catch {
|
||||
Write-Log "Failed to check/install 'playwright' package: $_" "ERROR"
|
||||
}
|
||||
|
||||
try {
|
||||
if ($PlaywrightBrowsers -eq 'all') {
|
||||
Write-Log "Installing all Playwright browsers..."
|
||||
& $venvPython -m playwright install
|
||||
} else {
|
||||
$list = $PlaywrightBrowsers -split ','
|
||||
foreach ($b in $list) {
|
||||
$btrim = $b.Trim()
|
||||
if ($btrim) {
|
||||
Write-Log "Installing Playwright browser: $btrim"
|
||||
& $venvPython -m playwright install $btrim
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
Write-Log "Playwright browser install failed: $_" "ERROR"
|
||||
}
|
||||
}
|
||||
|
||||
# Verify environment for known package conflicts (urllib3 compatibility)
|
||||
Write-Log "Verifying environment for known package conflicts (urllib3 compatibility)..."
|
||||
try {
|
||||
& $venvPython -c "import sys; from SYS.env_check import check_urllib3_compat; ok, msg = check_urllib3_compat(); print(msg); sys.exit(0 if ok else 2)"
|
||||
if ($LASTEXITCODE -ne 0) {
|
||||
Write-Log "Bootstrap detected a potentially broken 'urllib3' installation. See message above." "ERROR"
|
||||
Write-Log "Suggested fixes (activate the venv first):" "INFO"
|
||||
Write-Log " $ $venvPython -m pip uninstall urllib3-future -y" "INFO"
|
||||
Write-Log " $ $venvPython -m pip install --upgrade --force-reinstall urllib3" "INFO"
|
||||
Write-Log " $ $venvPython -m pip install niquests -U" "INFO"
|
||||
Write-Log "Aborting bootstrap to avoid leaving a broken environment." "ERROR"
|
||||
exit 7
|
||||
}
|
||||
} catch {
|
||||
Write-Log "Failed to run environment verification: $_" "ERROR"
|
||||
}
|
||||
|
||||
Write-Log "Deno is already installed: $($denoCmd.Path)"
|
||||
} else {
|
||||
Write-Log "Installing Deno via official installer (https://deno.land)"
|
||||
@@ -242,12 +283,12 @@ try {
|
||||
$cmdText = @"
|
||||
@echo off
|
||||
set "REPO=__REPO__"
|
||||
if exist "%REPO%\.venv\Scripts\mm.exe" (
|
||||
"%REPO%\.venv\Scripts\mm.exe" %*
|
||||
if exist "%REPO%\.venv\Scripts\python.exe" (
|
||||
"%REPO%\.venv\Scripts\python.exe" "%REPO%\CLI.py" %*
|
||||
exit /b %ERRORLEVEL%
|
||||
)
|
||||
if exist "%REPO%\.venv\Scripts\python.exe" (
|
||||
"%REPO%\.venv\Scripts\python.exe" -m medeia_macina.cli_entry %*
|
||||
if exist "%REPO%\CLI.py" (
|
||||
python "%REPO%\CLI.py" %*
|
||||
exit /b %ERRORLEVEL%
|
||||
)
|
||||
python -m medeia_macina.cli_entry %*
|
||||
@@ -266,12 +307,12 @@ python -m medeia_macina.cli_entry %*
|
||||
Param([Parameter(ValueFromRemainingArguments=$true)] $args)
|
||||
$repo = "__REPO__"
|
||||
$venv = Join-Path $repo '.venv'
|
||||
$exe = Join-Path $venv 'Scripts\mm.exe'
|
||||
if (Test-Path $exe) { & $exe @args; exit $LASTEXITCODE }
|
||||
$py = Join-Path $venv 'Scripts\python.exe'
|
||||
if (Test-Path $py) { & $py -m medeia_entry @args; exit $LASTEXITCODE }
|
||||
$cli = Join-Path $repo 'CLI.py'
|
||||
if (Test-Path $py) { & $py $cli @args; exit $LASTEXITCODE }
|
||||
if (Test-Path $cli) { & $py $cli @args; exit $LASTEXITCODE }
|
||||
# fallback
|
||||
python -m medeia_entry @args
|
||||
python $cli @args
|
||||
'@
|
||||
# Inject the actual repo path safely (escape embedded double-quotes if any)
|
||||
$ps1Text = $ps1Text.Replace('__REPO__', $repo.Replace('"', '""'))
|
||||
|
||||
@@ -9,6 +9,10 @@ DESKTOP=false
|
||||
PYTHON_CMD=""
|
||||
NOINSTALL=false
|
||||
FORCE=false
|
||||
QUIET=false
|
||||
# Playwright options
|
||||
PLAYWRIGHT_BROWSERS="chromium" # comma-separated (chromium,firefox,webkit) or 'all'
|
||||
NO_PLAYWRIGHT=false
|
||||
|
||||
usage() {
|
||||
cat <<EOF
|
||||
@@ -19,6 +23,9 @@ Options:
|
||||
--python <python> Python executable to use (e.g. python3)
|
||||
-d, --desktop Create a desktop launcher (~/.local/share/applications and ~/Desktop)
|
||||
-n, --no-install Skip pip install
|
||||
--no-playwright Skip installing Playwright browsers (default: install chromium)
|
||||
--playwright-browsers <list> Comma-separated list of browsers to install (default: chromium)
|
||||
-q, --quiet Quiet / non-interactive mode; abort on errors instead of prompting
|
||||
-f, --force Overwrite existing venv without prompting
|
||||
-h, --help Show this help
|
||||
EOF
|
||||
@@ -32,7 +39,10 @@ while [[ $# -gt 0 ]]; do
|
||||
-d|--desktop) DESKTOP=true; shift;;
|
||||
-n|--no-install) NOINSTALL=true; shift;;
|
||||
-f|--force) FORCE=true; shift;;
|
||||
-q|--quiet) QUIET=true; shift;;
|
||||
-h|--help) usage; exit 0;;
|
||||
--no-playwright) NO_PLAYWRIGHT=true; shift;;
|
||||
--playwright-browsers) PLAYWRIGHT_BROWSERS="$2"; shift 2;;
|
||||
*) echo "Unknown option: $1"; usage; exit 1;;
|
||||
esac
|
||||
done
|
||||
@@ -51,21 +61,52 @@ fi
|
||||
echo "Using Python: $PY"
|
||||
|
||||
if [[ -d "$VENV_PATH" ]]; then
|
||||
# Detect whether the existing venv has a working python executable
|
||||
VENV_PY=""
|
||||
for cand in "$VENV_PATH/bin/python" "$VENV_PATH/bin/python3" "$VENV_PATH/Scripts/python.exe"; do
|
||||
if [[ -x "$cand" ]]; then
|
||||
VENV_PY="$cand"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$FORCE" == "true" ]]; then
|
||||
echo "Removing existing venv $VENV_PATH"
|
||||
rm -rf "$VENV_PATH"
|
||||
else
|
||||
read -p "$VENV_PATH already exists. Overwrite? [y/N] " REPLY
|
||||
if [[ -z "$VENV_PY" ]]; then
|
||||
if [[ "$QUIET" == "true" ]]; then
|
||||
echo "ERROR: Existing venv appears incomplete or broken (no python executable). Use --force to recreate." >&2
|
||||
exit 4
|
||||
fi
|
||||
read -p "$VENV_PATH exists but appears invalid (no python executable). Overwrite to recreate? (y/N) " REPLY
|
||||
if [[ "$REPLY" != "y" && "$REPLY" != "Y" ]]; then
|
||||
echo "Aborted."; exit 0
|
||||
echo "Aborted."; exit 4
|
||||
fi
|
||||
rm -rf "$VENV_PATH"
|
||||
else
|
||||
if [[ "$QUIET" == "true" ]]; then
|
||||
echo "Using existing venv at $VENV_PATH (quiet mode)"
|
||||
else
|
||||
read -p "$VENV_PATH already exists. Overwrite? (y/N) (default: use existing venv) " REPLY
|
||||
if [[ "$REPLY" == "y" || "$REPLY" == "Y" ]]; then
|
||||
echo "Removing existing venv $VENV_PATH"
|
||||
rm -rf "$VENV_PATH"
|
||||
else
|
||||
echo "Continuing using existing venv at $VENV_PATH"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Creating venv at $VENV_PATH"
|
||||
$PY -m venv "$VENV_PATH"
|
||||
VENV_PY="$VENV_PATH/bin/python"
|
||||
if [[ -d "$VENV_PATH" && -n "${VENV_PY:-}" && -x "${VENV_PY:-}" ]]; then
|
||||
echo "Using existing venv at $VENV_PATH"
|
||||
else
|
||||
echo "Creating venv at $VENV_PATH"
|
||||
$PY -m venv "$VENV_PATH"
|
||||
VENV_PY="$VENV_PATH/bin/python"
|
||||
fi
|
||||
|
||||
if [[ ! -x "$VENV_PY" ]]; then
|
||||
echo "ERROR: venv python not found at $VENV_PY" >&2
|
||||
@@ -98,6 +139,41 @@ if [[ "$NOINSTALL" != "true" ]]; then
|
||||
echo "Action: Try running: $VENV_PY -m pip install -e . or inspect the venv site-packages to verify the installation." >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Verifying environment for known issues (urllib3 compatibility)..."
|
||||
if ! "$VENV_PY" -c 'from SYS.env_check import check_urllib3_compat; ok,msg = check_urllib3_compat(); print(msg); import sys; sys.exit(0 if ok else 2)'; then
|
||||
echo "ERROR: Bootstrap detected a potentially broken 'urllib3' installation. See message above." >&2
|
||||
echo "You can attempt to fix with:" >&2
|
||||
echo " $VENV_PY -m pip uninstall urllib3-future -y" >&2
|
||||
echo " $VENV_PY -m pip install --upgrade --force-reinstall urllib3" >&2
|
||||
echo " $VENV_PY -m pip install niquests -U" >&2
|
||||
exit 7
|
||||
fi
|
||||
|
||||
# Install Playwright browsers (default: chromium) unless explicitly disabled
|
||||
if [[ "$NO_PLAYWRIGHT" != "true" && "$NOINSTALL" != "true" ]]; then
|
||||
echo "Ensuring Playwright browsers are installed (browsers=$PLAYWRIGHT_BROWSERS)..."
|
||||
# Install package if missing in venv
|
||||
if ! "$VENV_PY" -c 'import importlib, sys; importlib.import_module("playwright")' >/dev/null 2>&1; then
|
||||
echo "'playwright' package not found in venv; installing via pip..."
|
||||
"$VENV_PY" -m pip install playwright
|
||||
fi
|
||||
|
||||
# Compute install behavior: 'all' means install all engines, otherwise split comma list
|
||||
if [[ "$PLAYWRIGHT_BROWSERS" == "all" ]]; then
|
||||
echo "Installing all Playwright browsers..."
|
||||
"$VENV_PY" -m playwright install || echo "Warning: Playwright browser install failed" >&2
|
||||
else
|
||||
IFS=',' read -ra PWB <<< "$PLAYWRIGHT_BROWSERS"
|
||||
for b in "${PWB[@]}"; do
|
||||
b_trimmed=$(echo "$b" | tr -d '[:space:]')
|
||||
if [[ -n "$b_trimmed" ]]; then
|
||||
echo "Installing Playwright browser: $b_trimmed"
|
||||
"$VENV_PY" -m playwright install "$b_trimmed" || echo "Warning: Playwright install for $b_trimmed failed" >&2
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "Skipping install (--no-install)"
|
||||
fi
|
||||
@@ -139,7 +215,7 @@ if [[ "$DESKTOP" == "true" ]]; then
|
||||
Name=Medeia-Macina
|
||||
Comment=Launch Medeia-Macina
|
||||
Exec=$EXEC_PATH
|
||||
Terminal=true
|
||||
Terminal=false
|
||||
Type=Application
|
||||
Categories=Utility;
|
||||
EOF
|
||||
|
||||
@@ -251,13 +251,14 @@ def main() -> int:
|
||||
sh_text = """#!/usr/bin/env bash
|
||||
set -e
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
VENV="$SCRIPT_DIR/.venv"
|
||||
if [ -x "$VENV/bin/mm" ]; then
|
||||
exec "$VENV/bin/mm" "$@"
|
||||
elif [ -x "$VENV/bin/python" ]; then
|
||||
exec "$VENV/bin/python" -m medeia_entry "$@"
|
||||
REPO="$SCRIPT_DIR"
|
||||
VENV="$REPO/.venv"
|
||||
PY="$VENV/bin/python"
|
||||
CLI_SCRIPT="$REPO/CLI.py"
|
||||
if [ -x "$PY" ]; then
|
||||
exec "$PY" "$CLI_SCRIPT" "$@"
|
||||
else
|
||||
exec python -m medeia_entry "$@"
|
||||
exec python "$CLI_SCRIPT" "$@"
|
||||
fi
|
||||
"""
|
||||
try:
|
||||
@@ -268,13 +269,14 @@ fi
|
||||
|
||||
ps1_text = r"""Param([Parameter(ValueFromRemainingArguments=$true)] $args)
|
||||
$scriptDir = Split-Path -Parent $MyInvocation.MyCommand.Path
|
||||
$venv = Join-Path $scriptDir '.venv'
|
||||
$exe = Join-Path $venv 'Scripts\mm.exe'
|
||||
if (Test-Path $exe) { & $exe @args; exit $LASTEXITCODE }
|
||||
$repo = $scriptDir
|
||||
$venv = Join-Path $repo '.venv'
|
||||
$py = Join-Path $venv 'Scripts\python.exe'
|
||||
if (Test-Path $py) { & $py -m medeia_entry @args; exit $LASTEXITCODE }
|
||||
$cli = Join-Path $repo 'CLI.py'
|
||||
if (Test-Path $py) { & $py $cli @args; exit $LASTEXITCODE }
|
||||
if (Test-Path $cli) { & $py $cli @args; exit $LASTEXITCODE }
|
||||
# fallback
|
||||
python -m medeia_entry @args
|
||||
python $cli @args
|
||||
"""
|
||||
try:
|
||||
ps1.write_text(ps1_text, encoding="utf-8")
|
||||
@@ -284,9 +286,9 @@ python -m medeia_entry @args
|
||||
bat_text = (
|
||||
"@echo off\r\n"
|
||||
"set SCRIPT_DIR=%~dp0\r\n"
|
||||
"if exist \"%SCRIPT_DIR%\\.venv\\Scripts\\mm.exe\" \"%SCRIPT_DIR%\\.venv\\Scripts\\mm.exe\" %*\r\n"
|
||||
"if exist \"%SCRIPT_DIR%\\.venv\\Scripts\\python.exe\" \"%SCRIPT_DIR%\\.venv\\Scripts\\python.exe\" -m medeia_entry %*\r\n"
|
||||
"python -m medeia_entry %*\r\n"
|
||||
"if exist \"%SCRIPT_DIR%\\.venv\\Scripts\\python.exe\" \"%SCRIPT_DIR%\\.venv\\Scripts\\python.exe\" \"%SCRIPT_DIR%\\CLI.py\" %*\r\n"
|
||||
"if exist \"%SCRIPT_DIR%\\CLI.py\" python \"%SCRIPT_DIR%\\CLI.py\" %*\r\n"
|
||||
"python -m medeia_macina.cli_entry %*\r\n"
|
||||
)
|
||||
try:
|
||||
bat.write_text(bat_text, encoding="utf-8")
|
||||
|
||||
Reference in New Issue
Block a user