This commit is contained in:
2026-01-21 22:52:52 -08:00
parent d94e321148
commit 201663bb62
9 changed files with 377 additions and 124 deletions

View File

@@ -991,8 +991,10 @@ class Table:
row.add_column("Tag", item.tag_name)
# Source/Store (where the tag values come from)
if hasattr(item, "source") and item.source:
row.add_column("Store", item.source)
# Support both 'source' (legacy) and 'store' (new) attribute names
source_val = getattr(item, "source", None) or getattr(item, "store", None)
if source_val:
row.add_column("Store", source_val)
def _add_pipe_object(self, row: Row, obj: Any) -> None:
"""Extract and add PipeObject fields to row."""
@@ -2013,11 +2015,13 @@ class ItemDetailView(Table):
title: str = "",
item_metadata: Optional[Dict[str, Any]] = None,
detail_title: Optional[str] = None,
exclude_tags: bool = False,
**kwargs
):
super().__init__(title, **kwargs)
self.item_metadata = item_metadata or {}
self.detail_title = detail_title
self.exclude_tags = exclude_tags
def to_rich(self):
"""Render the item details panel above the standard results table."""
@@ -2075,7 +2079,7 @@ class ItemDetailView(Table):
# Tags Summary
tags = self.item_metadata.get("Tags") or self.item_metadata.get("tags") or self.item_metadata.get("tag")
if tags and isinstance(tags, (list, str)):
if not self.exclude_tags and tags and isinstance(tags, (list, str)):
if isinstance(tags, str):
tags = [t.strip() for t in tags.split(",") if t.strip()]
tags_sorted = sorted(map(str, tags))

29
TUI.py
View File

@@ -889,15 +889,30 @@ class PipelineHubApp(App):
return
self.results_table.clear(columns=True)
if self.current_result_table and self.current_result_table.rows:
# Use ResultTable headers from the first row
first_row = self.current_result_table.rows[0]
headers = ["#"] + [col.name for col in first_row.columns]
if self.current_result_table:
# Determine headers - prefer actual rows if present
headers = ["#"]
if self.current_result_table.rows:
first_row = self.current_result_table.rows[0]
headers += [col.name for col in first_row.columns]
else:
# Fallback headers for empty but known table types
title = str(getattr(self.current_result_table, "title", "") or "").strip()
if title == "Tags":
headers += ["Tag", "Store"]
elif title == "Metadata" or "metadata" in title.lower():
headers += ["Field", "Value"]
elif title == "URLs":
headers += ["URL", "Type"]
else:
headers += ["Result"] # Generic fallback
self.results_table.add_columns(*headers)
rows = self.current_result_table.to_datatable_rows()
for idx, row_values in enumerate(rows, 1):
self.results_table.add_row(str(idx), *row_values, key=str(idx - 1))
if self.current_result_table.rows:
rows = self.current_result_table.to_datatable_rows()
for idx, row_values in enumerate(rows, 1):
self.results_table.add_row(str(idx), *row_values, key=str(idx - 1))
else:
# Fallback or empty state
self.results_table.add_columns("Row", "Title", "Source", "File")

View File

@@ -379,7 +379,7 @@ class ConfigModal(ModalScreen):
classes = _discover_store_classes()
if stype in classes:
cls = classes[stype]
if hasattr(cls, "config") and callable(cls.config):
if hasattr(cls, "config_schema") and callable(cls.config_schema):
for field_def in cls.config_schema():
k = field_def.get("key")
if k:
@@ -394,7 +394,7 @@ class ConfigModal(ModalScreen):
from ProviderCore.registry import get_provider_class
try:
pcls = get_provider_class(item_name)
if pcls and hasattr(pcls, "config") and callable(pcls.config):
if pcls and hasattr(pcls, "config_schema") and callable(pcls.config_schema):
for field_def in pcls.config_schema():
k = field_def.get("key")
if k:
@@ -665,7 +665,7 @@ class ConfigModal(ModalScreen):
all_classes = _discover_store_classes()
options = []
for stype, cls in all_classes.items():
if hasattr(cls, "config") and callable(cls.config):
if hasattr(cls, "config_schema") and callable(cls.config_schema):
try:
if cls.config_schema():
options.append(stype)
@@ -678,7 +678,7 @@ class ConfigModal(ModalScreen):
from ProviderCore.registry import get_provider_class
for ptype in provider_names:
pcls = get_provider_class(ptype)
if pcls and hasattr(pcls, "config") and callable(pcls.config):
if pcls and hasattr(pcls, "config_schema") and callable(pcls.config_schema):
try:
if pcls.config_schema():
options.append(ptype)
@@ -855,7 +855,7 @@ class ConfigModal(ModalScreen):
if stype in classes:
cls = classes[stype]
# Use schema for defaults if present
if hasattr(cls, "config") and callable(cls.config):
if hasattr(cls, "config_schema") and callable(cls.config_schema):
for field_def in cls.config_schema():
key = field_def.get("key")
if key:
@@ -889,7 +889,7 @@ class ConfigModal(ModalScreen):
new_config = {}
if pcls:
# Use schema for defaults
if hasattr(pcls, "config") and callable(pcls.config):
if hasattr(pcls, "config_schema") and callable(pcls.config_schema):
for field_def in pcls.config_schema():
key = field_def.get("key")
if key:
@@ -987,7 +987,7 @@ class ConfigModal(ModalScreen):
pcls = get_provider_class(item_name)
if pcls:
# Collect required keys from schema
if hasattr(pcls, "config") and callable(pcls.config):
if hasattr(pcls, "config_schema") and callable(pcls.config_schema):
for field_def in pcls.config_schema():
if field_def.get("required"):
k = field_def.get("key")

View File

@@ -5,7 +5,7 @@ from pathlib import Path
import sys
import re
from SYS.logger import log
from SYS.logger import log, debug
from SYS import models
from SYS import pipeline as ctx
@@ -420,11 +420,9 @@ def _refresh_tag_view(
except Exception:
return
if not target_hash or not store_name:
if not target_hash:
return
refresh_args: List[str] = ["-query", f"hash:{target_hash}", "-store", store_name]
get_tag = None
try:
get_tag = get_cmdlet("get-tag")
@@ -435,14 +433,54 @@ def _refresh_tag_view(
try:
subject = ctx.get_last_result_subject()
if subject and _matches_target(subject, target_hash, target_path, store_name):
get_tag(subject, refresh_args, config)
if not subject or not _matches_target(subject, target_hash, target_path, store_name):
return
except Exception:
pass
try:
get_tag(res, refresh_args, config)
refresh_args: List[str] = ["-query", f"hash:{target_hash}"]
# Build a lean subject so get-tag fetches fresh tags instead of reusing cached payloads.
def _value_has_content(value: Any) -> bool:
if value is None:
return False
if isinstance(value, str):
return bool(value.strip())
if isinstance(value, (list, tuple, set)):
return len(value) > 0
return True
def _build_refresh_subject() -> Dict[str, Any]:
payload: Dict[str, Any] = {}
payload["hash"] = target_hash
if _value_has_content(store_name):
payload["store"] = store_name
path_value = target_path or get_field(subject, "path")
if not _value_has_content(path_value):
path_value = get_field(subject, "target")
if _value_has_content(path_value):
payload["path"] = path_value
for key in ("title", "name", "url", "relations", "service_name"):
val = get_field(subject, key)
if _value_has_content(val):
payload[key] = val
extra_value = get_field(subject, "extra")
if isinstance(extra_value, dict):
cleaned = {
k: v for k, v in extra_value.items()
if str(k).lower() not in {"tag", "tags"}
}
if cleaned:
payload["extra"] = cleaned
elif _value_has_content(extra_value):
payload["extra"] = extra_value
return payload
refresh_subject = _build_refresh_subject()
with ctx.suspend_live_progress():
get_tag(refresh_subject, refresh_args, config)
except Exception:
pass
@@ -643,7 +681,7 @@ class Add_Tag(Cmdlet):
total_added = 0
total_modified = 0
store_registry = Store(config)
store_registry = Store(config, suppress_debug=True)
extract_matched_items = 0
extract_no_match_items = 0
@@ -1004,7 +1042,7 @@ class Add_Tag(Cmdlet):
raw_path
)
if changed and not is_last_stage and not use_inline_tags:
if changed and not use_inline_tags:
_refresh_tag_view(res, resolved_hash, str(store_name), raw_path, config)
if is_last_stage:

View File

@@ -78,9 +78,52 @@ def _refresh_tag_view_if_current(
refresh_args: list[str] = []
if file_hash:
refresh_args.extend(["-query", f"hash:{file_hash}"])
if store_name:
refresh_args.extend(["-store", store_name])
get_tag(subject, refresh_args, config)
# Build a lean subject so get-tag fetches fresh tags instead of reusing cached payloads.
def _value_has_content(value: Any) -> bool:
if value is None:
return False
if isinstance(value, str):
return bool(value.strip())
if isinstance(value, (list, tuple, set)):
return len(value) > 0
return True
def _build_refresh_subject() -> Dict[str, Any]:
payload: Dict[str, Any] = {}
payload["hash"] = file_hash
store_value = store_name or get_field(subject, "store")
if _value_has_content(store_value):
payload["store"] = store_value
path_value = path or get_field(subject, "path")
if not _value_has_content(path_value):
path_value = get_field(subject, "target")
if _value_has_content(path_value):
payload["path"] = path_value
for key in ("title", "name", "url", "relations", "service_name"):
val = get_field(subject, key)
if _value_has_content(val):
payload[key] = val
extra_value = get_field(subject, "extra")
if isinstance(extra_value, dict):
cleaned = {
k: v for k, v in extra_value.items()
if str(k).lower() not in {"tag", "tags"}
}
if cleaned:
payload["extra"] = cleaned
elif _value_has_content(extra_value):
payload["extra"] = extra_value
return payload
refresh_subject = _build_refresh_subject()
# Do not pass -store here as it triggers emit_mode/quiet in get-tag
with ctx.suspend_live_progress():
get_tag(refresh_subject, refresh_args, config)
except Exception:
pass
@@ -333,7 +376,7 @@ def _process_deletion(
def _fetch_existing_tags() -> list[str]:
try:
backend = Store(config)[store_name]
backend = Store(config, suppress_debug=True)[store_name]
existing, _src = backend.get_tag(resolved_hash, config=config)
return list(existing or [])
except Exception:
@@ -360,7 +403,7 @@ def _process_deletion(
return False
try:
backend = Store(config)[store_name]
backend = Store(config, suppress_debug=True)[store_name]
ok = backend.delete_tag(resolved_hash, list(tags), config=config)
if ok:
preview = resolved_hash[:12] + ("" if len(resolved_hash) > 12 else "")

View File

@@ -314,6 +314,7 @@ def _emit_tags_as_table(
item_title: Optional[str] = None,
path: Optional[str] = None,
subject: Optional[Any] = None,
quiet: bool = False,
) -> None:
"""Emit tags as TagItem objects and display via ResultTable.
@@ -335,8 +336,9 @@ def _emit_tags_as_table(
if path:
metadata["Path"] = path
# Create ItemDetailView
table = ItemDetailView("Tags", item_metadata=metadata, max_columns=1)
# Create ItemDetailView with exclude_tags=True so the panel shows file info
# but doesn't duplicate the tag list that we show as a table below.
table = ItemDetailView("Tags", item_metadata=metadata, max_columns=1, exclude_tags=True)
table.set_source_command("get-tag", [])
# Create TagItem for each tag
@@ -371,6 +373,15 @@ def _emit_tags_as_table(
except Exception:
table_applied = False
# Display the rich panel (metadata info) if not in quiet/emit-only mode.
# In the TUI, this output is captured and shown in the log pane.
if not quiet:
try:
from SYS.rich_display import stdout_console
stdout_console().print(table)
except Exception:
pass
if table_applied:
try:
if hasattr(ctx, "set_current_stage_table"):
@@ -1129,6 +1140,8 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
--emit: Emit result without interactive prompt (quiet mode)
-scrape <url|provider>: Scrape metadata from URL or provider name (itunes, openlibrary, googlebooks, imdb)
"""
emit_mode = False
is_store_backed = False
args_list = [str(arg) for arg in (args or [])]
raw_args = list(args_list)
@@ -1179,6 +1192,58 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
return 1
store_key = parsed_args.get("store")
emit_requested = parsed_args.get("emit", False)
# Only use emit mode if explicitly requested with --emit flag, not just because we're in a pipeline
# This allows interactive REPL to work even in pipelines
emit_mode = emit_requested or bool(store_key)
store_label = store_key.strip() if store_key and store_key.strip() else None
# Handle @N selection which creates a list - extract the first item
if isinstance(result, list) and len(result) > 0:
result = result[0]
try:
display_subject = ctx.get_last_result_subject()
except Exception:
display_subject = None
def _value_has_content(value: Any) -> bool:
if value is None:
return False
if isinstance(value, str):
return bool(value.strip())
if isinstance(value, (list, tuple, set)):
return len(value) > 0
return True
def _resolve_subject_value(*keys: str) -> Any:
for key in keys:
val = get_field(result, key, None)
if _value_has_content(val):
return val
if display_subject is None:
return None
for key in keys:
val = get_field(display_subject, key, None)
if _value_has_content(val):
return val
return None
# Resolve core identity early so it's available for all branches
hash_from_result = normalize_hash(_resolve_subject_value("hash"))
file_hash = hash_override or hash_from_result
store_value = _resolve_subject_value("store")
store_name = (store_key or str(store_value).strip()) if store_value is not None else store_key
subject_path = _resolve_subject_value("path", "target", "filename")
item_title = _resolve_subject_value("title", "name", "filename")
# Identify if the subject is store-backed. If so, we prioritize fresh data over cached tags.
# Note: PATH, URL, and LOCAL stores are transient and don't support backend get-tag refreshes.
is_store_backed = bool(file_hash and store_name and
str(store_name).upper() not in {"PATH", "URL", "LOCAL"})
scrape_url = parsed_args.get("scrape")
scrape_requested = scrape_flag_present or scrape_url is not None
@@ -1238,7 +1303,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
try:
from Store import Store
storage = Store(config)
storage = Store(config, suppress_debug=True)
backend = storage[str(store_name)]
except Exception as exc:
log(
@@ -1357,6 +1422,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
"scrape_url": scrape_target
},
},
quiet=emit_mode,
)
return 0
@@ -1396,7 +1462,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
try:
from Store import Store
storage = Store(config)
storage = Store(config, suppress_debug=True)
backend = storage[str(store_for_scrape)]
current_tags, _src = backend.get_tag(file_hash_for_scrape, config=config)
if isinstance(current_tags, (list, tuple, set)) and current_tags:
@@ -1562,6 +1628,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
"provider": "ytdlp",
"url": str(query_hint)
},
quiet=emit_mode,
)
return 0
@@ -1624,57 +1691,12 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
log("-scrape requires a URL argument", file=sys.stderr)
return 1
# Handle @N selection which creates a list - extract the first item
if isinstance(result, list) and len(result) > 0:
result = result[0]
try:
display_subject = ctx.get_last_result_subject()
except Exception:
display_subject = None
def _value_has_content(value: Any) -> bool:
if value is None:
return False
if isinstance(value, str):
return bool(value.strip())
if isinstance(value, (list, tuple, set)):
return len(value) > 0
return True
def _resolve_subject_value(*keys: str) -> Any:
for key in keys:
val = get_field(result, key, None)
if _value_has_content(val):
return val
if display_subject is None:
return None
for key in keys:
val = get_field(display_subject, key, None)
if _value_has_content(val):
return val
return None
# If the current result already carries a tag list (e.g. a selected metadata
# row from get-tag -scrape itunes), APPLY those tags to the file in the store.
result_provider = get_field(result, "provider", None)
result_tags = get_field(result, "tag", None)
if result_provider and isinstance(result_tags, list) and result_tags:
file_hash = normalize_hash(hash_override) or normalize_hash(
get_field(result,
"hash",
None)
)
store_name = get_field(result, "store", None)
subject_path = (
get_field(result,
"path",
None) or get_field(result,
"target",
None) or get_field(result,
"filename",
None)
)
if not file_hash or not store_name:
log(
"Selected metadata row is missing hash/store; cannot apply tags",
@@ -1691,6 +1713,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
None) or result_provider),
path=str(subject_path) if subject_path else None,
subject=result,
quiet=emit_mode,
)
_emit_tag_payload(
str(result_provider),
@@ -1715,7 +1738,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
try:
from Store import Store
storage = Store(config)
storage = Store(config, suppress_debug=True)
backend = storage[str(store_name)]
ok = bool(backend.add_tag(file_hash, apply_tags, config=config))
if not ok:
@@ -1759,6 +1782,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
"applied_provider": str(result_provider)
},
},
quiet=emit_mode,
)
_emit_tag_payload(
str(store_name),
@@ -1768,17 +1792,6 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
)
return 0
hash_from_result = normalize_hash(_resolve_subject_value("hash"))
file_hash = hash_override or hash_from_result
# Only use emit mode if explicitly requested with --emit flag, not just because we're in a pipeline
# This allows interactive REPL to work even in pipelines
emit_mode = emit_requested or bool(store_key)
store_label = store_key.strip() if store_key and store_key.strip() else None
# Get hash and store from result
store_value = _resolve_subject_value("store")
store_name = str(store_value).strip() if store_value is not None else None
if not file_hash:
log("No hash available in result", file=sys.stderr)
return 1
@@ -1787,9 +1800,6 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
log("No store specified in result", file=sys.stderr)
return 1
item_title = (
_resolve_subject_value("title", "name", "filename")
)
subject_store = store_name
subject_path_value = (
_resolve_subject_value("path", "target", "filename")
@@ -1833,7 +1843,10 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
display_tags: List[str] = []
if isinstance(raw_result_tags, list):
display_tags = [str(t) for t in raw_result_tags if t is not None]
if display_tags and not emit_mode:
# Only use cached tags if the item is NOT store-backed.
# For store-backed items (Hydrus/Folders), we want the latest state.
if display_tags and not emit_mode and not is_store_backed:
subject_payload = _subject_payload_with(display_tags)
_emit_tags_as_table(
display_tags,
@@ -1844,6 +1857,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
item_title=item_title,
path=subject_path,
subject=subject_payload,
quiet=emit_mode,
)
return 0
@@ -1851,7 +1865,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
try:
from Store import Store
storage = Store(config)
storage = Store(config, suppress_debug=True)
backend = storage[store_name]
current, source = backend.get_tag(file_hash, config=config)
current = list(current or [])
@@ -1877,6 +1891,7 @@ def _run(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
item_title=item_title,
path=subject_path,
subject=subject_payload,
quiet=emit_mode,
)
# If emit requested or store key provided, emit payload

View File

@@ -81,6 +81,9 @@ def run(cmd: list[str], quiet: bool = False, debug: bool = False, cwd: Optional[
subprocess.check_call(cmd, cwd=str(cwd) if cwd else None)
REPO_URL = "https://code.glowers.club/goyimnose/Medios-Macina.git"
class ProgressBar:
def __init__(self, total: int, quiet: bool = False):
self.total = total
@@ -475,8 +478,86 @@ def main() -> int:
# Ensure repo_root is always the project root, not the current working directory
# This prevents issues when bootstrap.py is run from different directories
script_dir = Path(__file__).resolve().parent
repo_root = script_dir.parent
try:
script_path = Path(__file__).resolve()
script_dir = script_path.parent
repo_root = script_dir.parent
except NameError:
# Running via pipe/eval, __file__ is not defined
script_path = None
script_dir = Path.cwd()
repo_root = Path.cwd()
# DETECT REPOSITORY
# Check if we are already inside a valid Medios-Macina repo
def _is_valid_mm_repo(p: Path) -> bool:
return (p / "CLI.py").exists() and (p / "scripts").exists()
is_in_repo = _is_valid_mm_repo(repo_root)
# If not in the parent of the script, check the current working directory
if not is_in_repo and _is_valid_mm_repo(Path.cwd()):
repo_root = Path.cwd()
script_dir = repo_root / "scripts"
is_in_repo = True
# STANDALONE INSTALLER MODE
# If the script is run from a location that doesn't look like a Medios-Macina repo,
# or if we're in a completely empty directory, offer to clone the repo.
if not is_in_repo:
if not args.quiet:
print("\n" + "=" * 60)
print(" MEDEOS-MACINA STANDALONE INSTALLER")
print("=" * 60)
print("No existing Medeos-Macina repository found at this location.")
if script_path:
print(f"Current script location: {script_path}")
# Check for git
if not shutil.which("git"):
print("\nError: 'git' was not found on your PATH.", file=sys.stderr)
print("Please install Git (https://git-scm.com/) and try again.", file=sys.stderr)
return 1
try:
# Ask for installation folder
default_install = Path.cwd() / "Medios-Macina"
print(f"\nWhere would you like to install Medeos-Macina?")
install_dir_raw = input(f"Installation directory [{default_install}]: ").strip()
if not install_dir_raw:
install_path = default_install
else:
install_path = Path(install_dir_raw).resolve()
except EOFError:
print("Non-interactive session: cannot proceed with clone.", file=sys.stderr)
return 1
if not install_path.exists():
print(f"Creating directory: {install_path}")
install_path.mkdir(parents=True, exist_ok=True)
# Check if it already has a repo (user might have chosen an existing folder)
if _is_valid_mm_repo(install_path):
print(f"Found existing repository in {install_path}.")
repo_root = install_path
else:
print(f"Cloning Medeos-Macina into {install_path}...")
print(f"Source: {REPO_URL}")
try:
subprocess.check_call(["git", "clone", REPO_URL, str(install_path)])
repo_root = install_path
except Exception as e:
print(f"Error: Failed to clone repository: {e}", file=sys.stderr)
return 1
# Change directory to the newly established repo root
os.chdir(str(repo_root))
print(f"\nSuccessfully set up repository at {repo_root}")
print("Resuming bootstrap...\n")
# Re-initialize script_dir for the rest of the script
# as if we started inside the repo scripts folder.
script_dir = repo_root / "scripts"
if not args.quiet:
print(f"Bootstrap script location: {script_dir}")

View File

@@ -30,7 +30,7 @@ import urllib.request
import zipfile
import re
from pathlib import Path
from typing import Optional, Tuple
from typing import Optional, Sequence, Tuple
import logging
@@ -308,7 +308,11 @@ def find_project_venv(root: Path) -> Optional[Path]:
return None
def maybe_reexec_under_project_venv(root: Path, disable: bool = False) -> None:
def maybe_reexec_under_project_venv(
root: Path,
disable: bool = False,
extra_argv: Sequence[str] | None = None,
) -> None:
"""If a project venv exists and we are not already running under it, re-exec
the current script using that venv's python interpreter.
@@ -344,9 +348,12 @@ def maybe_reexec_under_project_venv(root: Path, disable: bool = False) -> None:
script_path = Path(sys.argv[0]).resolve()
except Exception:
script_path = None
args = [str(py),
str(script_path) if script_path is not None else sys.argv[0]
] + sys.argv[1:]
args = [
str(py),
str(script_path) if script_path is not None else sys.argv[0]
] + sys.argv[1:]
if extra_argv:
args += list(extra_argv)
logging.debug("Exec args: %s", args)
os.execvpe(str(py), args, env)
except Exception as exc:
@@ -852,6 +859,11 @@ def main(argv: Optional[list[str]] = None) -> int:
action="store_true",
help="Do not attempt to re-exec the script under a project venv (if present)",
)
parser.add_argument(
"--use-project-venv",
action="store_true",
help="Force using the project venv even when running interactively",
)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose logging")
args = parser.parse_args(argv)
@@ -861,6 +873,7 @@ def main(argv: Optional[list[str]] = None) -> int:
# Interactive setup for root and name if not provided and in a TTY
# We check sys.argv directly to see if the flags were explicitly passed.
interactive_setup = False
if sys.stdin.isatty() and not any(arg in sys.argv for arg in ["--root", "-r", "--dest-name", "-d"]):
print("\nHydrusNetwork Setup")
print("--------------------")
@@ -870,6 +883,9 @@ def main(argv: Optional[list[str]] = None) -> int:
try:
root_input = input(f"Enter root directory for Hydrus installation [default: {default_root}]: ").strip()
if root_input:
# If they typed "C:" or similar, assume they want the root "C:\"
if len(root_input) == 2 and root_input[1] == ":" and root_input[0].isalpha():
root_input += "\\"
args.root = root_input
else:
args.root = str(default_root)
@@ -881,6 +897,7 @@ def main(argv: Optional[list[str]] = None) -> int:
except (EOFError, KeyboardInterrupt):
print("\nSetup cancelled.")
return 0
interactive_setup = True
# Expand variables like $HOME or %USERPROFILE% and ~
args.root = os.path.expandvars(args.root)
@@ -889,7 +906,25 @@ def main(argv: Optional[list[str]] = None) -> int:
venv_py = None
# Re-exec under project venv by default when present (opt-out with --no-project-venv)
try:
maybe_reexec_under_project_venv(root, disable=bool(args.no_project_venv))
# If we are already running in a venv-like environment, we might skip re-exec.
# However, we only re-exec if the target root is the same as the project root.
disable_reexec = bool(args.no_project_venv)
# Don't re-exec when running interactively unless explicitly requested.
if interactive_setup and not args.use_project_venv:
disable_reexec = True
current_repo_root = Path(__file__).resolve().parent.parent
# Only re-exec if the target root folder matches the folder where THIS script lives.
# This prevents picking up Medios-Macina's .venv when installing Hydrus to a separate drive/folder.
if root != current_repo_root and not args.use_project_venv:
disable_reexec = True
maybe_reexec_under_project_venv(
root,
disable=disable_reexec,
extra_argv=["--root", args.root, "--dest-name", args.dest_name],
)
except Exception:
pass
@@ -1036,8 +1071,10 @@ def main(argv: Optional[list[str]] = None) -> int:
"pyopenssl": "OpenSSL",
"pysocks": "socks",
"service-identity": "service_identity",
"show-in-file-manager": "showinfm",
"opencv-python-headless": "cv2",
"pyyside6": "PySide6",
"mpv": "mpv",
"pyside6": "PySide6",
"pyside6-essentials": "PySide6",
"pyside6-addons": "PySide6",
}
@@ -1345,14 +1382,16 @@ def main(argv: Optional[list[str]] = None) -> int:
"python-dateutil": "dateutil",
"beautifulsoup4": "bs4",
"pillow-heif": "pillow_heif",
"pillow-jxl-plugin": "pillow_jxl_plugin",
"pillow-jxl-plugin": "pillow_jxl",
"pyopenssl": "OpenSSL",
"pysocks": "socks",
"service-identity": "service_identity",
"show-in-file-manager": "show_in_file_manager",
"show-in-file-manager": "showinfm",
"opencv-python-headless": "cv2",
"mpv": "mpv",
"pyside6": "PySide6",
"pyside6-essentials": "PySide6",
"pyside6-addons": "PySide6",
}
for pkg in pkgs:
try:
@@ -1463,8 +1502,18 @@ def main(argv: Optional[list[str]] = None) -> int:
run_client_script = None
if client_found:
# Prefer run_client helper located in the cloned repo; if missing, fall back to top-level scripts folder helper.
script_dir = Path(__file__).resolve().parent
helper_src = script_dir / "run_client.py"
helper_dest = dest / "run_client.py"
if helper_src.exists() and not helper_dest.exists():
try:
shutil.copy2(helper_src, helper_dest)
if os.name != "nt":
helper_dest.chmod(helper_dest.stat().st_mode | 0o111)
logging.debug("Copied run_client helper to %s", helper_dest)
except Exception as exc: # pragma: no cover - best effort
logging.debug("Failed to copy run_client helper: %s", exc)
# Prefer run_client helper located in the cloned repo; if missing, fall back to top-level scripts folder helper.
helper_candidates = [dest / "run_client.py", script_dir / "run_client.py"]
for cand in helper_candidates:
if cand.exists():

View File

@@ -138,11 +138,11 @@ def verify_imports(venv_py: Path, packages: List[str]) -> bool:
"python-dateutil": "dateutil",
"beautifulsoup4": "bs4",
"pillow-heif": "pillow_heif",
"pillow-jxl-plugin": "pillow_jxl_plugin",
"pillow-jxl-plugin": "pillow_jxl",
"pyopenssl": "OpenSSL",
"pysocks": "socks",
"service-identity": "service_identity",
"show-in-file-manager": "show_in_file_manager",
"show-in-file-manager": "showinfm",
"opencv-python-headless": "cv2",
"mpv": "mpv",
"pyside6": "PySide6",
@@ -642,16 +642,23 @@ def main(argv: Optional[List[str]] = None) -> int:
args = p.parse_args(argv)
workspace_root = Path(__file__).resolve().parent.parent
# Determine default repo root: prefer <workspace>/hydrusnetwork when present
script_dir = Path(__file__).resolve().parent
if (script_dir / "hydrus_client.py").exists():
workspace_root = script_dir
else:
workspace_root = script_dir.parent
if args.repo_root:
repo_root = Path(args.repo_root).expanduser().resolve()
else:
candidate = workspace_root / "hydrusnetwork"
if candidate.exists():
repo_root = candidate
else:
if (workspace_root / "hydrus_client.py").exists():
repo_root = workspace_root
else:
candidate = workspace_root / "hydrusnetwork"
if candidate.exists():
repo_root = candidate
else:
repo_root = workspace_root
venv_py = find_venv_python(repo_root, args.venv, args.venv_name)
@@ -822,14 +829,15 @@ def main(argv: Optional[List[str]] = None) -> int:
cmd = [str(venv_py), str(client_path)] + client_args
# Determine headless vs GUI
first_run = is_first_run(repo_root)
if args.gui:
headless = False
elif args.headless:
headless = True
else:
headless = not first_run
if not args.quiet and first_run:
# Default to GUI for the client launcher
headless = False
if not args.quiet and is_first_run(repo_root):
print("First run detected: defaulting to GUI unless --headless is specified.")
env = os.environ.copy()