This commit is contained in:
2026-01-22 02:45:08 -08:00
parent 3d571b007b
commit ba23c0606f
18 changed files with 75 additions and 5355 deletions

3
.gitignore vendored
View File

@@ -242,4 +242,5 @@ tmp_*
authtoken.secret
mypy.
.idea
.idea
medios.db

View File

@@ -1,646 +0,0 @@
"""ZeroTier helpers and discovery utilities.
This module provides a small, dependency-light API for interacting with a
local zerotier-one node (preferred via Python module when available, else via
`zerotier-cli`), discovering peers on a given ZeroTier network, and probing
for services running on those peers (e.g., our remote storage server or a
Hydrus instance).
Notes:
- This is intentionally conservative and all operations are best-effort and
fail gracefully when the local system does not have ZeroTier installed.
- The implementation prefers a Python ZeroTier binding when available, else
falls back to calling the `zerotier-cli` binary (if present) and parsing
JSON output where possible.
Example usage:
from API import zerotier
if zerotier.is_available():
nets = zerotier.list_networks()
zerotier.join_network("8056c2e21c000001")
services = zerotier.discover_services_on_network("8056c2e21c000001", ports=[999], paths=["/health","/api_version"]) # noqa: E501
"""
from __future__ import annotations
import json
import os
import shutil
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from SYS.logger import debug
# Optional Python ZeroTier bindings - prefer them when available
_HAVE_PY_ZEROTIER = False
try:
# Try common package names; not all installations will have this available
# This import is optional and callers should still work via the CLI fallback.
import zerotier as _zt # type: ignore
_HAVE_PY_ZEROTIER = True
except Exception:
_HAVE_PY_ZEROTIER = False
@dataclass
class ZeroTierNetwork:
id: str
name: str
status: str
assigned_addresses: List[str]
@dataclass
class ZeroTierServiceProbe:
address: str
port: int
path: str
url: str
ok: bool
status_code: Optional[int]
payload: Optional[Any]
service_hint: Optional[str] = None
def _get_cli_path() -> Optional[str]:
"""Find the zerotier-cli binary or script across common locations."""
# 1. Check PATH
p = shutil.which("zerotier-cli")
if p:
return p
# 2. Check common installation paths
candidates = []
if sys.platform == "win32":
# Check various Program Files locations and both .bat and .exe
roots = [
os.environ.get("ProgramFiles(x86)", r"C:\Program Files (x86)"),
os.environ.get("ProgramFiles", r"C:\Program Files"),
os.environ.get("ProgramData", r"C:\ProgramData"),
]
for root in roots:
base = os.path.join(root, "ZeroTier", "One", "zerotier-cli")
candidates.append(base + ".bat")
candidates.append(base + ".exe")
else:
# Linux / macOS
candidates = [
"/usr/sbin/zerotier-cli",
"/usr/local/bin/zerotier-cli",
"/sbin/zerotier-cli",
"/var/lib/zerotier-one/zerotier-cli",
]
for c in candidates:
try:
if os.path.isfile(c):
return str(c)
except Exception:
pass
return None
def _get_home_path() -> Optional[str]:
"""Return the ZeroTier home directory (containing authtoken.secret)."""
if sys.platform == "win32":
path = os.path.join(os.environ.get("ProgramData", r"C:\ProgramData"), "ZeroTier", "One")
if os.path.isdir(path):
return path
else:
# Linux
if os.path.isdir("/var/lib/zerotier-one"):
return "/var/lib/zerotier-one"
# macOS
if os.path.isdir("/Library/Application Support/ZeroTier/One"):
return "/Library/Application Support/ZeroTier/One"
return None
def _get_authtoken() -> Optional[str]:
"""Try to read the local ZeroTier authtoken.secret from the ZeroTier home dir."""
home = _get_home_path()
if home:
token_file = os.path.join(home, "authtoken.secret")
if os.path.isfile(token_file):
try:
with open(token_file, "r") as f:
return f.read().strip()
except Exception:
pass
return None
def _read_token_file(path: str) -> Optional[str]:
"""Read a token from an arbitrary file path (safely).
Returns the stripped token string or None on error.
"""
try:
with open(path, "r") as f:
t = f.read().strip()
return t if t else None
except Exception as exc:
debug(f"read_token_file failed: {exc}")
return None
def _find_file_upwards(filename: str, start: Optional[str] = None) -> Optional[str]:
"""Search for `filename` by walking up parent directories starting at `start` (or CWD).
Returns the first matching path or None.
"""
start_dir = Path(start or os.getcwd()).resolve()
for p in [start_dir] + list(start_dir.parents):
candidate = p / filename
if candidate.is_file():
return str(candidate)
return None
def _find_repo_root(start: Optional[str] = None) -> Optional[str]:
"""Find a probable repository root by looking for .git/pyproject.toml/setup.py upwards from start.
Returns the directory path or None.
"""
start_dir = Path(start or Path(__file__).resolve().parent).resolve()
for p in [start_dir] + list(start_dir.parents):
if (p / ".git").exists() or (p / "pyproject.toml").exists() or (p / "setup.py").exists():
return str(p)
return None
def _get_token_path() -> Optional[str]:
"""Return the source of an auth token: 'env' or a filesystem path to authtoken.secret.
This checks in order: env token string, env token file, CWD (and parents), repo root,
user home, and finally the system ZeroTier home.
"""
# 1: token provided directly in env
if os.environ.get("ZEROTIER_AUTH_TOKEN") or os.environ.get("ZEROTIER_AUTHTOKEN"):
return "env"
# 2: token file path provided
p = os.environ.get("ZEROTIER_AUTH_TOKEN_FILE") or os.environ.get("ZEROTIER_AUTHTOKEN_FILE")
if p and os.path.isfile(p):
return p
# 3: token file in current working dir or any parent
up = _find_file_upwards("authtoken.secret", start=os.getcwd())
if up:
return up
# 4: token file at repository root (helpful if TUI runs with a different CWD)
repo = _find_repo_root()
if repo:
rp = os.path.join(repo, "authtoken.secret")
if os.path.isfile(rp):
return rp
# 5: token file in user's home
home_candidate = os.path.join(str(Path.home()), "authtoken.secret")
if os.path.isfile(home_candidate):
return home_candidate
# 6: fallback to the ZeroTier home location
zhome = _get_home_path()
if zhome:
tz = os.path.join(zhome, "authtoken.secret")
if os.path.isfile(tz):
return tz
return None
def _get_token_override() -> Optional[str]:
"""Read the token value using the path determined by `_get_token_path()` or env.
Returns the token string, or None if no token is available.
"""
path_or_env = _get_token_path()
if path_or_env == "env":
t = os.environ.get("ZEROTIER_AUTH_TOKEN") or os.environ.get("ZEROTIER_AUTHTOKEN")
return t.strip() if t else None
if path_or_env:
return _read_token_file(path_or_env)
return None
def _cli_available() -> bool:
return _get_cli_path() is not None
def is_available() -> bool:
"""Return True if we can interact with ZeroTier locally (module or CLI)."""
return _HAVE_PY_ZEROTIER or _cli_available()
def _run_cli_capture(*args: str, timeout: float = 5.0) -> Tuple[int, str, str]:
"""Run zerotier-cli and return (returncode, stdout, stderr).
This centralizes how we call the CLI so we can always capture stderr and
returncodes and make debugging failures much easier.
"""
bin_path = _get_cli_path()
if not bin_path:
raise RuntimeError("zerotier-cli not found")
full_args = list(args)
token = _get_token_override()
if token and not any(a.startswith("-T") for a in full_args):
# Do not log the token itself; we log only its presence/length for debugging
debug(f"Using external authtoken (len={len(token)}) for CLI auth")
full_args.insert(0, f"-T{token}")
home = _get_home_path()
if home and not any(a.startswith("-D") for a in full_args):
full_args.insert(0, f"-D{home}")
cmd = [bin_path, *full_args]
debug(f"Running zerotier-cli: {cmd}")
use_shell = sys.platform == "win32" and str(bin_path).lower().endswith(".bat")
proc = subprocess.run(cmd, timeout=timeout, capture_output=True, text=True, shell=use_shell)
return proc.returncode, proc.stdout, proc.stderr
def _run_cli_json(*args: str, timeout: float = 5.0) -> Any:
"""Run zerotier-cli with arguments and parse JSON output if possible.
Returns parsed JSON on success, or raises an exception with stderr when non-zero exit.
"""
rc, out, err = _run_cli_capture(*args, timeout=timeout)
if rc != 0:
# Surface stderr or stdout in the exception so callers (and logs) can show
# the actionable message instead of a blind CalledProcessError.
raise RuntimeError(f"zerotier-cli failed (rc={rc}): {err.strip() or out.strip()}")
try:
return json.loads(out)
except Exception:
# Some CLI invocations might print non-json; return as raw string
return out
def list_networks() -> List[ZeroTierNetwork]:
"""Return a list of configured ZeroTier networks on this node.
Best-effort: prefers Python binding, then `zerotier-cli listnetworks -j`.
"""
nets: List[ZeroTierNetwork] = []
if _HAVE_PY_ZEROTIER:
try:
# Attempt to use common API shape (best-effort)
raw = _zt.list_networks() # type: ignore[attr-defined]
# If the Python binding returned results, use them. If it returned
# an empty list/None, fall back to the CLI so we don't return a
# false-empty result to the UI.
if raw:
for n in raw:
# raw entries are expected to be dict-like
nets.append(ZeroTierNetwork(
id=str(n.get("id") or n.get("networkId") or ""),
name=str(n.get("name") or ""),
status=str(n.get("status") or ""),
assigned_addresses=list(n.get("assignedAddresses") or []),
))
return nets
else:
debug("py-zerotier returned no networks; falling back to CLI")
except Exception as exc: # pragma: no cover - optional dependency
debug(f"py-zerotier listing failed: {exc}")
# CLI fallback
try:
data = _run_cli_json("listnetworks", "-j")
if isinstance(data, list):
for entry in data:
nets.append(ZeroTierNetwork(
id=str(entry.get("id") or ""),
name=str(entry.get("name") or ""),
status=str(entry.get("status") or ""),
assigned_addresses=list(entry.get("assignedAddresses") or []),
))
except Exception as exc:
debug(f"list_networks failed: {exc}")
return nets
def join_network(network_id: str) -> bool:
"""Join the given ZeroTier network (best-effort).
Returns True on success, False otherwise.
"""
network_id = str(network_id or "").strip()
if not network_id:
raise ValueError("network_id is required")
if _HAVE_PY_ZEROTIER:
try:
_zt.join_network(network_id) # type: ignore[attr-defined]
return True
except Exception as exc: # pragma: no cover - optional dependency
debug(f"py-zerotier join failed: {exc}")
if _cli_available():
try:
rc, out, err = _run_cli_capture("join", network_id, timeout=10)
if rc == 0:
return True
# Surface the CLI's stderr/stdout to callers as an exception so the TUI
# can show a helpful error (instead of a generic 'failed to join').
raise RuntimeError(f"zerotier-cli join failed (rc={rc}): {err.strip() or out.strip()}")
except Exception:
# Re-raise so callers (UI/tests) can react to the exact error
raise
return False
def leave_network(network_id: str) -> bool:
network_id = str(network_id or "").strip()
if not network_id:
raise ValueError("network_id is required")
if _HAVE_PY_ZEROTIER:
try:
_zt.leave_network(network_id) # type: ignore[attr-defined]
return True
except Exception as exc: # pragma: no cover - optional dependency
debug(f"py-zerotier leave failed: {exc}")
if _cli_available():
try:
rc, out, err = _run_cli_capture("leave", network_id, timeout=10)
if rc == 0:
return True
raise RuntimeError(f"zerotier-cli leave failed (rc={rc}): {err.strip() or out.strip()}")
except Exception:
raise
return False
def _strip_addr(addr: str) -> str:
# Remove trailing CID parts like '/24' and zone IDs like '%eth0'
if not addr:
return addr
a = addr.split("/")[0]
if "%" in a:
a = a.split("%", 1)[0]
return a
def get_assigned_addresses(network_id: str) -> List[str]:
"""Return assigned ZeroTier addresses for the local node on the given network."""
network_id = str(network_id or "").strip()
if not network_id:
return []
for n in list_networks():
if n.id == network_id:
return [str(_strip_addr(a)) for a in n.assigned_addresses if a]
return []
def get_assigned_subnets(network_id: str) -> List[str]:
"""Return CIDR subnets (e.g. '10.147.17.0/24') for the given network."""
network_id = str(network_id or "").strip()
if not network_id:
return []
subnets = []
for n in list_networks():
if n.id == network_id:
for addr in n.assigned_addresses:
if addr and "/" in addr:
# Calculate subnet base
try:
import ipaddress
net = ipaddress.ip_network(addr, strict=False)
subnets.append(str(net))
except Exception:
pass
return subnets
def fetch_central_members(network_id: str, api_token: str) -> List[Dict[str, Any]]:
"""Fetch member details from ZeroTier Central API.
Requires a valid ZeroTier Central API token.
Returns a list of member objects containing 'config' with 'ipAssignments', etc.
"""
url = f"https://my.zerotier.com/api/v1/network/{network_id}/member"
headers = {"Authorization": f"token {api_token}"}
try:
import httpx
resp = httpx.get(url, headers=headers, timeout=10)
resp.raise_for_status()
return resp.json()
except Exception:
try:
import requests
resp = requests.get(url, headers=headers, timeout=10)
resp.raise_for_status()
return resp.json()
except Exception as exc:
debug(f"ZeroTier Central API fetch failed: {exc}")
return []
def list_peers() -> List[Dict[str, Any]]:
"""Return peers known to the local ZeroTier node (best-effort parsing).
If CLI supports JSON output for peers it will be parsed, otherwise we return
an empty list.
"""
if _HAVE_PY_ZEROTIER:
try:
peers = _zt.list_peers() # type: ignore[attr-defined]
return list(peers or [])
except Exception as exc: # pragma: no cover - optional dependency
debug(f"py-zerotier list_peers failed: {exc}")
if _cli_available():
try:
data = _run_cli_json("peers", "-j")
if isinstance(data, list):
return data
except Exception as exc:
debug(f"zerotier-cli peers failed: {exc}")
return []
def _probe_url(url: str, *, timeout: float = 2.0, accept_json: bool = True) -> Tuple[bool, Optional[int], Optional[Any]]:
"""Try fetching the URL and return (ok, status_code, payload).
Uses httpx if available, otherwise falls back to requests.
"""
try:
try:
import httpx
resp = httpx.get(url, timeout=timeout)
code = int(resp.status_code if hasattr(resp, "status_code") else resp.status)
content_type = str(resp.headers.get("content-type") or "").lower()
if code == 200 and accept_json and "json" in content_type:
try:
return True, code, resp.json()
except Exception:
return True, code, resp.text
return (code == 200), code, resp.text
except Exception:
import requests # type: ignore
resp = requests.get(url, timeout=timeout)
code = int(resp.status_code)
content_type = str(resp.headers.get("content-type") or "").lower()
if code == 200 and accept_json and "json" in content_type:
try:
return True, code, resp.json()
except Exception:
return True, code, resp.text
return (code == 200), code, resp.text
except Exception as exc:
debug(f"Probe failed: {url} -> {exc}")
return False, None, None
def discover_services_on_network(
network_id: str,
*,
ports: Optional[List[int]] = None,
paths: Optional[List[str]] = None,
timeout: float = 2.0,
accept_json: bool = True,
api_token: Optional[str] = None,
) -> List[ZeroTierServiceProbe]:
"""Probe peers on the given network for HTTP services.
If api_token is provided, it fetches all member IPs from ZeroTier Central.
Otherwise, it only probes the local node's assigned addresses (for now).
"""
net = str(network_id or "").strip()
if not net:
raise ValueError("network_id required")
ports = list(ports or [999])
paths = list(paths or ["/health", "/api_version"])
addresses = get_assigned_addresses(net)
if api_token:
members = fetch_central_members(net, api_token)
for m in members:
# Look for online members with IP assignments
if m.get("online") and m.get("config", {}).get("ipAssignments"):
for ip in m["config"]["ipAssignments"]:
addr = str(ip).split("/")[0]
if addr not in addresses:
addresses.append(addr)
else:
# Fallback: if no Central token, and we are on a likely /24 subnet,
# we can try to guess/probe peers on that same subnet.
subnets = get_assigned_subnets(net)
for subnet_str in subnets:
try:
import ipaddress
subnet = ipaddress.ip_network(subnet_str, strict=False)
# Only scan if subnet is reasonably small (e.g. <= /24 = 256 hosts)
if subnet.num_addresses <= 256:
for ip in subnet.hosts():
addr = str(ip)
if addr not in addresses:
addresses.append(addr)
except Exception:
pass
probes: List[ZeroTierServiceProbe] = []
# Parallelize probes to make subnet scanning feasible
import concurrent.futures
def do_probe(host):
host_probes = []
for port in ports:
# Try HTTP first as it's the common case for local storage
for scheme in ("http", "https"):
# Fast probe of just the first path
path = paths[0]
url = f"{scheme}://{host}:{port}{path}"
ok, code, payload = _probe_url(url, timeout=timeout, accept_json=accept_json)
if ok or code == 401:
hint = None
try:
# remote_storage_server returns {"status": "ok", ...}
if code == 401:
hint = "remote_storage" # Most likely
elif isinstance(payload, dict) and payload.get("status"):
hint = "remote_storage"
# hydrus returns {"api_version": ...}
elif isinstance(payload, dict) and payload.get("api_version"):
hint = "hydrus"
except Exception:
pass
host_probes.append(ZeroTierServiceProbe(
address=host,
port=int(port),
path=path,
url=url,
ok=(code == 200),
status_code=code,
payload=payload,
service_hint=hint,
))
# Stop probing other schemes/paths for this host/port
break
return host_probes
# Use ThreadPoolExecutor for concurrent I/O probes
max_workers = min(50, len(addresses) or 1)
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_addr = {executor.submit(do_probe, addr): addr for addr in addresses}
for future in concurrent.futures.as_completed(future_to_addr):
try:
probes.extend(future.result())
except Exception:
pass
return probes
def find_peer_service(
network_id: str,
*,
service_hint: Optional[str] = None,
port: Optional[int] = None,
path_candidates: Optional[List[str]] = None,
timeout: float = 2.0,
api_token: Optional[str] = None,
) -> Optional[ZeroTierServiceProbe]:
"""Return the first probe that matches service_hint or is successful.
Useful for selecting a peer to configure a store against.
"""
paths = path_candidates or ["/health", "/api_version", "/session_key"]
ports = [port] if port is not None else [999, 5000, 45869, 80, 443]
probes = discover_services_on_network(
network_id, ports=ports, paths=paths, timeout=timeout, api_token=api_token
)
if not probes:
return None
if service_hint:
for p in probes:
if p.service_hint and service_hint.lower() in str(p.service_hint).lower():
return p
# Hydrus detection: check payload for 'api_version'
try:
if service_hint.lower() == "hydrus" and isinstance(p.payload, dict) and p.payload.get("api_version"):
return p
except Exception:
pass
# Fallback: return the first OK probe
return probes[0] if probes else None

5
CLI.py
View File

@@ -93,7 +93,6 @@ _install_rich_traceback(show_locals=False)
from SYS.logger import debug, set_debug
from SYS.worker_manager import WorkerManager
from SYS.background_services import ensure_zerotier_server_running, stop_zerotier_server
from SYS.cmdlet_catalog import (
get_cmdlet_arg_choices,
@@ -1587,8 +1586,6 @@ class CLI:
return app
def run(self) -> None:
ensure_zerotier_server_running()
# Ensure Rich tracebacks are active even when invoking subcommands.
try:
config = self._config_loader.load()
@@ -2402,8 +2399,6 @@ Come to love it when others take what you share, as there is no greater joy
if pipeline_ctx_ref:
pipeline_ctx_ref.clear_current_command_text()
stop_zerotier_server()
_PTK_Lexer = object # type: ignore

View File

@@ -1,143 +0,0 @@
from __future__ import annotations
import os
import sys
import subprocess
from pathlib import Path
from typing import Optional
from SYS.config import load_config
from SYS.logger import debug, log
_zt_server_proc: Optional[subprocess.Popen] = None
_zt_server_last_config: Optional[str] = None
# We no longer use atexit here because explicit lifecycle management
# is preferred in TUI/REPL, and background servers use a monitor thread
# to shut down when the parent dies.
# atexit.register(lambda: stop_zerotier_server())
def ensure_zerotier_server_running() -> None:
"""Check config and ensure the ZeroTier storage server is running if needed."""
global _zt_server_proc, _zt_server_last_config
try:
# Load config from the project root (where config.conf typically lives)
repo_root = Path(__file__).resolve().parent.parent
cfg = load_config(repo_root)
except Exception:
return
zt_conf = cfg.get("networking", {}).get("zerotier", {})
serve_target = zt_conf.get("serve")
port = zt_conf.get("port") or 999
api_key = zt_conf.get("api_key")
# Config hash to detect changes
config_id = f"{serve_target}|{port}|{api_key}"
# Check if proc is still alive
if _zt_server_proc:
if _zt_server_proc.poll() is not None:
# Process died
debug("ZeroTier background server died. Restarting...")
_zt_server_proc = None
elif config_id == _zt_server_last_config:
# Already running with correct config
return
# If config changed and we have a proc, stop it
if _zt_server_proc and config_id != _zt_server_last_config:
debug("ZeroTier server config changed. Stopping old process...")
try:
_zt_server_proc.terminate()
_zt_server_proc.wait(timeout=2)
except Exception:
try:
_zt_server_proc.kill()
except Exception:
pass
_zt_server_proc = None
_zt_server_last_config = config_id
if not serve_target:
return
# Resolve path
storage_path = None
folders = cfg.get("store", {}).get("folder", {})
for name, block in folders.items():
if name.lower() == serve_target.lower():
storage_path = block.get("path") or block.get("PATH")
break
if not storage_path:
# Fallback to direct path
storage_path = serve_target
if not storage_path or not Path(storage_path).exists():
debug(f"ZeroTier host target '{serve_target}' not found at {storage_path}. Cannot start server.")
return
repo_root = Path(__file__).resolve().parent.parent
server_script = repo_root / "scripts" / "remote_storage_server.py"
if not server_script.exists():
debug(f"ZeroTier server script not found at {server_script}")
return
# Use the same python executable that is currently running
# On Windows, explicitly prefer the .venv python if it exists
python_exe = sys.executable
if sys.platform == "win32":
venv_py = repo_root / ".venv" / "Scripts" / "python.exe"
if venv_py.exists():
python_exe = str(venv_py)
cmd = [python_exe, str(server_script),
"--storage-path", str(storage_path),
"--port", str(port),
"--monitor"]
cmd += ["--parent-pid", str(os.getpid())]
if api_key:
cmd += ["--api-key", str(api_key)]
try:
debug(f"Starting ZeroTier storage server: {cmd}")
# Capture errors to a log file instead of DEVNULL
log_file = repo_root / "zt_server_error.log"
with open(log_file, "a") as f:
f.write(f"\n--- Starting server at {__import__('datetime').datetime.now()} ---\n")
f.write(f"Command: {' '.join(cmd)}\n")
f.write(f"CWD: {repo_root}\n")
f.write(f"Python: {python_exe}\n")
err_f = open(log_file, "a")
# On Windows, CREATE_NO_WINDOW = 0x08000000 ensures no console pops up
import subprocess
_zt_server_proc = subprocess.Popen(
cmd,
stdout=subprocess.DEVNULL,
stderr=err_f,
cwd=str(repo_root),
creationflags=0x08000000 if sys.platform == "win32" else 0
)
log(f"ZeroTier background server started on port {port} (sharing {serve_target})")
except Exception as e:
debug(f"Failed to start ZeroTier server: {e}")
_zt_server_proc = None
def stop_zerotier_server() -> None:
"""Stop the background server if it is running."""
global _zt_server_proc
if _zt_server_proc:
try:
_zt_server_proc.terminate()
_zt_server_proc.wait(timeout=2)
except Exception:
try:
_zt_server_proc.kill()
except Exception:
pass
_zt_server_proc = None

View File

@@ -188,19 +188,6 @@ def _apply_conf_block(
tool[tool_name] = dict(block)
return
if kind_l == "networking":
net_name = str(subtype).strip().lower()
if not net_name:
return
net = config.setdefault("networking", {})
if not isinstance(net, dict):
config["networking"] = {}
net = config["networking"]
existing = net.get(net_name)
if isinstance(existing, dict):
_merge_dict_inplace(existing, block)
else:
net[net_name] = dict(block)
return
@@ -366,24 +353,6 @@ def _serialize_conf(config: Dict[str, Any]) -> str:
seen_keys.add(k_upper)
lines.append(f"{k}={_format_conf_value(block.get(k))}")
# Networking blocks
networking = config.get("networking")
if isinstance(networking, dict):
for name in sorted(networking.keys()):
block = networking.get(name)
if not isinstance(block, dict):
continue
lines.append("")
lines.append(f"[networking={name}]")
seen_keys = set()
for k in sorted(block.keys()):
k_upper = k.upper()
if k_upper in seen_keys:
continue
seen_keys.add(k_upper)
lines.append(f"{k}={_format_conf_value(block.get(k))}")
return "\n".join(lines).rstrip() + "\n"
@@ -674,34 +643,6 @@ def resolve_debug_log(config: Dict[str, Any]) -> Optional[Path]:
return path
def migrate_conf_to_db(config: Dict[str, Any]) -> None:
"""Migrate the configuration dictionary to the database."""
log("Migrating configuration from .conf to database...")
for key, value in config.items():
if key in ("store", "provider", "tool", "networking"):
cat = key
sub_dict = value
if isinstance(sub_dict, dict):
for subtype, subtype_items in sub_dict.items():
if isinstance(subtype_items, dict):
# For provider/tool/networking, subtype is the name (e.g. alldebrid)
# but for store, it's the type (e.g. hydrusnetwork)
if cat == "store" and str(subtype).strip().lower() == "folder":
continue
if cat != "store":
for k, v in subtype_items.items():
save_config_value(cat, subtype, "", k, v)
else:
for name, items in subtype_items.items():
if isinstance(items, dict):
for k, v in items.items():
save_config_value(cat, subtype, name, k, v)
else:
# Global setting
save_config_value("global", "", "", key, value)
log("Configuration migration complete!")
def load_config(
config_dir: Optional[Path] = None, filename: str = DEFAULT_CONFIG_FILENAME
) -> Dict[str, Any]:
@@ -712,37 +653,12 @@ def load_config(
if cache_key in _CONFIG_CACHE:
return _CONFIG_CACHE[cache_key]
# 1. Try loading from database first
# Load from database
db_config = get_config_all()
if db_config:
_CONFIG_CACHE[cache_key] = db_config
return db_config
# 2. If DB is empty, try loading from legacy config.conf
if config_path.exists():
if config_path.suffix.lower() != ".conf":
log(f"Unsupported config format: {config_path.name} (only .conf is supported)")
return {}
try:
config = _load_conf_config(base_dir, config_path)
# Migrate to database
migrate_conf_to_db(config)
# Optional: Rename old config file to mark as migrated
try:
migrated_path = config_path.with_name(config_path.name + ".migrated")
config_path.rename(migrated_path)
log(f"Legacy config file renamed to {migrated_path.name}")
except Exception as e:
log(f"Could not rename legacy config file: {e}")
_CONFIG_CACHE[cache_key] = config
return config
except Exception as e:
log(f"Failed to load legacy config at {config_path}: {e}")
return {}
return {}
@@ -771,18 +687,43 @@ def save_config(
base_dir = config_dir or SCRIPT_DIR
config_path = base_dir / filename
if config_path.suffix.lower() != ".conf":
raise RuntimeError(
f"Unsupported config format: {config_path.name} (only .conf is supported)"
)
# Safety Check: placeholder (folder store validation removed)
_validate_config_safety(config)
# 1. Save to Database
try:
config_path.write_text(_serialize_conf(config), encoding="utf-8")
except OSError as exc:
raise RuntimeError(f"Failed to write config to {config_path}: {exc}") from exc
from SYS.database import db, save_config_value
# We want to clear and re-save or just update?
# For simplicity, we'll iterate and update.
for key, value in config.items():
if key in ('store', 'provider', 'tool'):
if isinstance(value, dict):
for subtype, instances in value.items():
if isinstance(instances, dict):
# provider/tool are usually config[cat][subtype][key]
# but store is config['store'][subtype][name][key]
if key == 'store':
for name, settings in instances.items():
if isinstance(settings, dict):
for k, v in settings.items():
save_config_value(key, subtype, name, k, v)
else:
for k, v in instances.items():
save_config_value(key, subtype, "default", k, v)
else:
# global settings
if not key.startswith("_"):
save_config_value("global", "none", "none", key, value)
except Exception as e:
log(f"Failed to save config to database: {e}")
# 2. Legacy fallback: write to .conf for now (optional, but keep for backward compat for a bit)
if config_path.suffix.lower() == ".conf":
# Safety Check: placeholder (folder store validation removed)
_validate_config_safety(config)
try:
config_path.write_text(_serialize_conf(config), encoding="utf-8")
except OSError as exc:
log(f"Failed to write legacy config to {config_path}: {exc}")
cache_key = _make_cache_key(config_dir, filename, config_path)
_CONFIG_CACHE[cache_key] = config

View File

@@ -138,7 +138,8 @@ def save_config_value(category: str, subtype: str, item_name: str, key: str, val
def get_config_all() -> Dict[str, Any]:
"""Retrieve all configuration from the database in the legacy dict format."""
try:
db.execute("DELETE FROM config WHERE category='store' AND LOWER(subtype)='folder'")
db.execute("DELETE FROM config WHERE category='store' AND LOWER(subtype) in ('folder', 'zerotier')")
db.execute("DELETE FROM config WHERE category='networking'")
except Exception:
pass
rows = db.fetchall("SELECT category, subtype, item_name, key, value FROM config")
@@ -165,7 +166,7 @@ def get_config_all() -> Dict[str, Any]:
config[key] = parsed_val
else:
# Modular structure: config[cat][sub][name][key]
if cat in ('provider', 'tool', 'networking'):
if cat in ('provider', 'tool'):
cat_dict = config.setdefault(cat, {})
sub_dict = cat_dict.setdefault(sub, {})
sub_dict[key] = parsed_val

View File

@@ -48,14 +48,6 @@ _PROVIDER_DEPENDENCIES: Dict[str, List[Tuple[str, str]]] = {
"soulseek": [("aioslsk", "aioslsk>=1.6.0")],
}
# Dependencies required when ZeroTier features are configured (auto-install when enabled)
_ZEROTIER_DEPENDENCIES: List[Tuple[str, str]] = [
("flask", "flask>=2.3.0"),
("flask_cors", "flask-cors>=3.0.1"),
("werkzeug", "werkzeug>=2.3.0"),
]
def florencevision_missing_modules() -> List[str]:
return [
requirement
@@ -151,29 +143,5 @@ def maybe_auto_install_configured_tools(config: Dict[str, Any]) -> None:
label = f"{provider_name.title()} provider"
_install_requirements(label, requirements)
# ZeroTier: if a zerotier section is present OR a zerotier store is configured,
# optionally auto-install Flask-based remote server dependencies so the
# `remote_storage_server.py` and CLI helper will run out-of-the-box.
try:
zerotier_cfg = (config or {}).get("zerotier")
store_cfg = (config or {}).get("store") if isinstance(config, dict) else {}
store_has_zerotier = isinstance(store_cfg, dict) and bool(store_cfg.get("zerotier"))
if (isinstance(zerotier_cfg, dict) and zerotier_cfg) or store_has_zerotier:
auto_install = True
if isinstance(zerotier_cfg, dict) and "auto_install" in zerotier_cfg:
auto_install = _as_bool(zerotier_cfg.get("auto_install"), True)
if auto_install:
missing = [
requirement
for import_name, requirement in _ZEROTIER_DEPENDENCIES
if not _try_import(import_name)
]
if missing:
_install_requirements("ZeroTier", missing)
except Exception:
# Don't let optional-dep logic raise at startup
pass
__all__ = ["maybe_auto_install_configured_tools", "florencevision_missing_modules"]

File diff suppressed because it is too large Load Diff

View File

@@ -1,659 +0,0 @@
"""ZeroTier-backed Store implementation.
This store locates a service running on peers in a ZeroTier network and
proxies store operations to that remote service. The remote service can be
our `remote_storage_server` (default) or a Hydrus API server (`service=hydrus`).
Configuration keys:
- NAME: store instance name (required)
- NETWORK_ID: ZeroTier network ID to use for discovery (required)
- SERVICE: 'remote' or 'hydrus' (default: 'remote')
- PORT: service port (default: 999 for remote, 45869 for hydrus)
- API_KEY: optional API key to include in requests
- HOST: optional preferred peer address (skip discovery if provided)
Notes:
- This implementation focuses on read operations (search, get_file, get_metadata,
tag/url ops). Uploads can be implemented later when the remote server
supports a robust, authenticated upload endpoint.
"""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from SYS.logger import debug, log
from Store._base import Store
class ZeroTier(Store):
@classmethod
def config_schema(cls) -> List[Dict[str, Any]]:
return [
{"key": "NAME", "label": "Store Name", "default": "", "required": True},
{"key": "NETWORK_ID", "label": "ZeroTier Network ID", "default": "", "required": True},
{"key": "HOST", "label": "Peer address (IP)", "default": "", "required": True},
{"key": "PORT", "label": "Service Port", "default": "999", "required": False},
{"key": "SERVICE", "label": "Service Type (remote|hydrus)", "default": "remote", "required": False},
{"key": "API_KEY", "label": "API Key (optional)", "default": "", "required": False, "secret": True},
{"key": "TIMEOUT", "label": "Request timeout (s)", "default": "5", "required": False},
]
def __new__(cls, *args: Any, **kwargs: Any) -> "ZeroTier":
inst = super().__new__(cls)
name = kwargs.get("NAME")
if name is not None:
setattr(inst, "NAME", str(name))
return inst
def __init__(
self,
instance_name: Optional[str] = None,
network_id: Optional[str] = None,
service: Optional[str] = None,
port: Optional[int] = None,
api_key: Optional[str] = None,
host: Optional[str] = None,
timeout: Optional[int] = None,
*,
NAME: Optional[str] = None,
NETWORK_ID: Optional[str] = None,
SERVICE: Optional[str] = None,
PORT: Optional[int] = None,
API_KEY: Optional[str] = None,
HOST: Optional[str] = None,
TIMEOUT: Optional[int] = None,
) -> None:
if instance_name is None and NAME is not None:
instance_name = str(NAME)
if network_id is None and NETWORK_ID is not None:
network_id = str(NETWORK_ID)
if service is None and SERVICE is not None:
service = str(SERVICE)
if port is None and PORT is not None:
try:
port = int(PORT)
except Exception:
port = None
if api_key is None and API_KEY is not None:
api_key = str(API_KEY)
if host is None and HOST is not None:
host = str(HOST)
if timeout is None and TIMEOUT is not None:
try:
timeout = int(TIMEOUT)
except Exception:
timeout = None
self._name = str(instance_name or "")
self._network_id = str(network_id or "").strip()
self._service = (str(service or "remote") or "remote").lower()
self._port = int(port if port is not None else (45869 if self._service == "hydrus" else 999))
self._api_key = str(api_key or "").strip() or None
self._preferred_host = str(host or "").strip() or None
self._timeout = int(timeout or 5)
# Cached discovery result
self._cached_peer: Optional[Tuple[str, int]] = None
self._cached_client: Optional[Any] = None
def name(self) -> str:
return str(getattr(self, "_name", "zerotier"))
# -------------------- internal helpers --------------------
def _discover_peer(self, *, refresh: bool = False) -> Optional[Tuple[str, int]]:
"""Discover a peer host:port for this service on the configured network.
Returns (host, port) or None.
"""
if self._preferred_host and not refresh:
return (self._preferred_host, self._port)
if self._cached_peer and not refresh:
return self._cached_peer
try:
from API import zerotier as zt
except Exception as exc:
debug(f"ZeroTier discovery helper not available: {exc}")
return None
# Try to find a central API key for better discovery
from SYS.config import load_config
conf = load_config()
net_conf = conf.get("networking", {}).get("zerotier", {})
central_token = net_conf.get("api_key")
# Look for a matching service on the network
probe = zt.find_peer_service(
self._network_id,
service_hint=("hydrus" if self._service == "hydrus" else None),
port=self._port,
api_token=central_token,
)
if probe:
# Extract host:port
host = probe.address
port = probe.port or self._port
self._cached_peer = (host, int(port))
debug(f"ZeroTier store '{self.name()}' discovered peer {host}:{port}")
return self._cached_peer
debug(f"ZeroTier store '{self.name()}' found no peers on network {self._network_id}")
return None
def _ensure_client(self, *, refresh: bool = False) -> Optional[Any]:
"""Return a remote client object or base URL depending on service type.
For 'hydrus' service we return an API.HydrusNetwork instance; for 'remote'
service we return a base URL string to send HTTP requests to.
"""
if self._cached_client and not refresh:
return self._cached_client
peer = self._discover_peer(refresh=refresh)
if not peer:
return None
host, port = peer
if self._service == "hydrus":
try:
from API.HydrusNetwork import HydrusNetwork as HydrusClient
base_url = f"http://{host}:{port}"
client = HydrusClient(url=base_url, access_key=(self._api_key or ""), timeout=self._timeout)
self._cached_client = client
return client
except Exception as exc:
debug(f"Failed to instantiate Hydrus client for ZeroTier peer {host}:{port}: {exc}")
return None
# Default: remote_storage 'http' style API
self._cached_client = f"http://{host}:{port}"
return self._cached_client
def _request_remote(self, method: str, path: str, *, params: Optional[Dict[str, Any]] = None, json_body: Optional[Any] = None, timeout: Optional[int] = None) -> Optional[Any]:
base = self._ensure_client()
if base is None or not isinstance(base, str):
debug("No remote base URL available for ZeroTier store")
return None
url = base.rstrip("/") + path
headers = {}
if self._api_key:
headers["X-API-Key"] = self._api_key
try:
import httpx
resp = httpx.request(method, url, params=params, json=json_body, headers=headers, timeout=timeout or self._timeout)
if resp.status_code == 401:
log(f"[Store={self._name}] Remote service at {url} requires an API Key. Please configure 'API_KEY' for this store.", severity="warning")
resp.raise_for_status()
try:
return resp.json()
except Exception:
return resp.text
except Exception as exc:
debug(f"ZeroTier HTTP request failed: {method} {url} -> {exc}")
return None
# -------------------- Store API --------------------
def search(self, query: str, **kwargs: Any) -> List[Dict[str, Any]]:
"""Search for files on the remote service."""
client = self._ensure_client()
if client is None:
debug("ZeroTier search: no client available")
return []
if self._service == "hydrus":
# Hydrus API expects tags list; best-effort: treat query as a single tag or raw search term
try:
tags = [query]
payload = client.search_files(tags, return_hashes=True, return_file_ids=False, return_file_count=False)
# Hydrus JSON shape varies; normalize to simple list
files = []
try:
if isinstance(payload, dict):
rows = payload.get("files") or payload.get("metadata") or []
for r in rows:
files.append(r if isinstance(r, dict) else {})
except Exception:
pass
return files
except Exception as exc:
debug(f"Hydrus search failed: {exc}")
return []
# remote_storage path
params = {"q": query, "limit": int(kwargs.get("limit", 100))}
res = self._request_remote("GET", "/files/search", params=params)
if isinstance(res, dict):
files = list(res.get("files") or [])
# Inject store name and normalize keys for the CLI
for f in files:
if isinstance(f, dict):
f["store"] = self._name
# remote_storage_server returns 'file_path' and 'size'
# CLI prefers 'path' and 'size_bytes'
if "file_path" in f and "path" not in f:
f["path"] = f["file_path"]
# Try to extract title from tags
tags = f.get("tag") or []
title_tag = next((t for t in tags if str(t).lower().startswith("title:")), None)
if title_tag and ":" in title_tag:
f["title"] = title_tag.split(":", 1)[1].strip()
elif "title" not in f:
try:
f["title"] = Path(f["file_path"]).stem
except Exception:
f["title"] = f["file_path"]
if "size" in f and "size_bytes" not in f:
f["size_bytes"] = f["size"]
return files
return []
def get_file(self, file_hash: str, **kwargs: Any) -> Optional[Path | str]:
"""Return either a URL (hydrus or remote capable) or local path (not implemented).
For Hydrus: return the direct file URL (Hydrus client URL with access token appended if needed).
For remote_storage: currently return the metadata path (if available) or None.
"""
client = self._ensure_client()
if client is None:
return None
if self._service == "hydrus":
try:
# Hydrus wrapper provides file_url() convenience
return client.file_url(file_hash)
except Exception as exc:
debug(f"Hydrus get_file failed: {exc}")
return None
# remote storage: return download URL
base = self._ensure_client()
if not base or not isinstance(base, str):
return None
url = f"{base.rstrip('/')}/files/raw/{file_hash}"
if self._api_key:
sep = "&" if "?" in url else "?"
url += f"{sep}api_key={self._api_key}"
return url
def download_to_temp(
self,
file_hash: str,
temp_root: Optional[Path] = None,
suffix: Optional[str] = None,
progress_callback: Optional[Callable[[int, int], None]] = None,
) -> Optional[Path]:
"""Download a file from the remote peer to a local temporary file."""
import os
import httpx
import tempfile
if self._service == "hydrus":
return None
url = self.get_file(file_hash)
if not url or not isinstance(url, str) or not url.startswith("http"):
return None
# Ensure suffix starts with a dot if provided
if suffix and not suffix.startswith("."):
suffix = f".{suffix}"
if not suffix:
suffix = ".tmp"
try:
# Use provided temp_root or system temp
if temp_root:
temp_root.mkdir(parents=True, exist_ok=True)
fd, tmp_path = tempfile.mkstemp(dir=str(temp_root), suffix=suffix)
else:
fd, tmp_path = tempfile.mkstemp(suffix=suffix)
os_fd = os.fdopen(fd, "wb")
headers = {}
if self._api_key:
headers["X-API-Key"] = self._api_key
downloaded = 0
total = 0
with httpx.stream("GET", url, headers=headers, timeout=self._timeout) as r:
r.raise_for_status()
total = int(r.headers.get("Content-Length", 0))
# Use a larger chunk size for ZeroTier/P2P efficiency
for chunk in r.iter_bytes(chunk_size=128 * 1024):
if chunk:
os_fd.write(chunk)
downloaded += len(chunk)
if progress_callback:
try:
progress_callback(downloaded, total)
except Exception:
pass
os_fd.close()
return Path(tmp_path)
except Exception as exc:
debug(f"ZeroTier download_to_temp failed for {file_hash}: {exc}")
return None
def add_file(self, file_path: Path, **kwargs: Any) -> Optional[str]:
"""Upload a local file to the remote ZeroTier peer (supports 'remote' and 'hydrus' services).
Returns the file hash on success, or None on failure.
"""
p = Path(file_path)
if not p.exists():
debug(f"ZeroTier add_file: local file not found: {p}")
return None
# Hydrus: delegate to Hydrus client add_file()
if self._service == "hydrus":
try:
client = self._ensure_client()
if client is None:
debug("ZeroTier add_file: Hydrus client unavailable")
return None
return client.add_file(p, **kwargs)
except Exception as exc:
debug(f"ZeroTier hydrus add_file failed: {exc}")
return None
# Remote server: POST /files/upload multipart/form-data
base = self._ensure_client()
if base is None or not isinstance(base, str):
debug("ZeroTier add_file: no remote base URL available")
return None
url = base.rstrip("/") + "/files/upload"
headers = {}
if self._api_key:
headers["X-API-Key"] = self._api_key
try:
import httpx
with open(p, "rb") as fh:
# Build form fields for tags/urls (support list or comma-separated)
data = []
if "tag" in kwargs:
tags = kwargs.get("tag") or []
if isinstance(tags, str):
tags = [t.strip() for t in tags.split(",") if t.strip()]
for t in tags:
data.append(("tag", t))
if "url" in kwargs:
urls = kwargs.get("url") or []
if isinstance(urls, str):
urls = [u.strip() for u in urls.split(",") if u.strip()]
for u in urls:
data.append(("url", u))
files = {"file": (p.name, fh, "application/octet-stream")}
# Prefer `requests` for local testing / WSGI servers which may not accept
# chunked uploads reliably with httpx/httpcore. Fall back to httpx otherwise.
try:
try:
import requests
# Convert data list-of-tuples to dict for requests (acceptable for repeated fields)
data_dict = {}
for k, v in data:
if k in data_dict:
existing = data_dict[k]
if not isinstance(existing, list):
data_dict[k] = [existing]
data_dict[k].append(v)
else:
data_dict[k] = v
r = requests.post(url, headers=headers, files=files, data=data_dict or None, timeout=self._timeout)
if r.status_code in (200, 201):
try:
payload = r.json()
file_hash = payload.get("hash") or payload.get("file_hash")
return file_hash
except Exception:
return None
try:
debug(f"[zerotier-debug] upload failed (requests) status={r.status_code} body={r.text}")
except Exception:
pass
debug(f"ZeroTier add_file failed (requests): status {r.status_code} body={getattr(r, 'text', '')}")
return None
except Exception:
import httpx
resp = httpx.post(url, headers=headers, files=files, data=data, timeout=self._timeout)
# Note: some environments may not create request.files correctly; capture body for debugging
try:
if resp.status_code in (200, 201):
try:
payload = resp.json()
file_hash = payload.get("hash") or payload.get("file_hash")
return file_hash
except Exception:
return None
# Debug output to help tests capture server response
try:
debug(f"[zerotier-debug] upload failed status={resp.status_code} body={resp.text}")
except Exception:
pass
debug(f"ZeroTier add_file failed: status {resp.status_code} body={getattr(resp, 'text', '')}")
return None
except Exception as exc:
debug(f"ZeroTier add_file exception: {exc}")
return None
except Exception as exc:
debug(f"ZeroTier add_file exception: {exc}")
return None
except Exception as exc:
debug(f"ZeroTier add_file exception: {exc}")
return None
def get_metadata(self, file_hash: str, **kwargs: Any) -> Optional[Dict[str, Any]]:
client = self._ensure_client()
if client is None:
return None
if self._service == "hydrus":
try:
payload = client.fetch_file_metadata(hashes=[file_hash], include_file_url=True, include_size=True, include_mime=True)
return payload
except Exception as exc:
debug(f"Hydrus fetch_file_metadata failed: {exc}")
return None
res = self._request_remote("GET", f"/files/{file_hash}")
if isinstance(res, dict):
# Extract title from tags for the details panel/metadata view
tags = res.get("tag") or []
title_tag = next((t for t in tags if str(t).lower().startswith("title:")), None)
if title_tag and ":" in title_tag:
res["title"] = title_tag.split(":", 1)[1].strip()
return res
return None
def get_tag(self, file_identifier: str, **kwargs: Any) -> Tuple[List[str], str]:
# Return (tags, service). For hydrus use fetch_file_metadata service keys.
client = self._ensure_client()
if client is None:
return ([], "")
if self._service == "hydrus":
try:
payload = client.fetch_file_metadata(hashes=[file_identifier], include_service_keys_to_tags=True)
tags = []
if isinstance(payload, dict):
metas = payload.get("metadata") or []
if metas and isinstance(metas, list) and metas:
md = metas[0]
if isinstance(md, dict):
tags = md.get("service_keys_to_tags") or []
return (tags, "hydrus")
except Exception as exc:
debug(f"Hydrus get_tag failed: {exc}")
return ([], "hydrus")
res = self._request_remote("GET", f"/tags/{file_identifier}")
if isinstance(res, dict):
return (list(res.get("tag") or []), "remote")
return ([], "remote")
def add_tag(self, file_identifier: str, tags: List[str], **kwargs: Any) -> bool:
client = self._ensure_client()
if client is None:
return False
if self._service == "hydrus":
try:
service_name = kwargs.get("service_name") or "my tags"
client.add_tag(file_identifier, tags, service_name)
return True
except Exception as exc:
debug(f"Hydrus add_tag failed: {exc}")
return False
payload = {"tag": tags}
res = self._request_remote("POST", f"/tags/{file_identifier}", json_body=payload)
return bool(res)
def delete_tag(self, file_identifier: str, tags: List[str], **kwargs: Any) -> bool:
client = self._ensure_client()
if client is None:
return False
if self._service == "hydrus":
try:
service_name = kwargs.get("service_name") or "my tags"
client.delete_tag(file_identifier, tags, service_name)
return True
except Exception as exc:
debug(f"Hydrus delete_tag failed: {exc}")
return False
# remote_storage DELETE /tags/<hash>?tag=tag1,tag2
query = {"tag": ",".join(tags)}
res = self._request_remote("DELETE", f"/tags/{file_identifier}", params=query)
return bool(res)
def get_url(self, file_identifier: str, **kwargs: Any) -> List[str]:
# For Hydrus, use fetch_file_metadata to include file URL; for remote, GET tags endpoint includes urls
client = self._ensure_client()
if client is None:
return []
if self._service == "hydrus":
try:
payload = client.fetch_file_metadata(hashes=[file_identifier], include_file_url=True)
try:
metas = payload.get("metadata") or []
if metas and isinstance(metas, list) and metas:
md = metas[0]
if isinstance(md, dict):
urls = md.get("file_urls") or []
return list(urls)
except Exception:
pass
return []
except Exception as exc:
debug(f"Hydrus get_url failed: {exc}")
return []
meta = self._request_remote("GET", f"/files/{file_identifier}")
if isinstance(meta, dict):
urls = meta.get("url") or []
return list(urls)
return []
def add_url(self, file_identifier: str, url: List[str], **kwargs: Any) -> bool:
client = self._ensure_client()
if client is None:
return False
if self._service == "hydrus":
try:
client.associate_url(hashes=[file_identifier], url=url[0])
return True
except Exception as exc:
debug(f"Hydrus add_url failed: {exc}")
return False
payload = {"url": url}
res = self._request_remote("POST", f"/files/{file_identifier}/url", json_body=payload)
return bool(res)
def delete_url(self, file_identifier: str, url: List[str], **kwargs: Any) -> bool:
client = self._ensure_client()
if client is None:
return False
if self._service == "hydrus":
try:
client.delete_urls(hashes=[file_identifier], urls=url)
return True
except Exception as exc:
debug(f"Hydrus delete_url failed: {exc}")
return False
payload = {"url": url}
res = self._request_remote("DELETE", f"/files/{file_identifier}/url", json_body=payload)
return bool(res)
def get_note(self, file_identifier: str, **kwargs: Any) -> Dict[str, str]:
"""Get named notes for a file. Returns a mapping of name->text."""
client = self._ensure_client()
if client is None:
return {}
if self._service == "hydrus":
try:
# Hydrus API may expose notes via fetch_file_metadata; best-effort
payload = client.fetch_file_metadata(hashes=[file_identifier], include_notes=True)
if isinstance(payload, dict):
metas = payload.get("metadata") or []
if metas and isinstance(metas, list):
md = metas[0]
notes = md.get("notes") or {}
return dict(notes)
except Exception:
return {}
# Remote storage has no notes API yet
return {}
def set_note(self, file_identifier: str, name: str, text: str, **kwargs: Any) -> bool:
client = self._ensure_client()
if client is None:
return False
if self._service == "hydrus":
try:
client.set_note(file_identifier, name, text)
return True
except Exception:
return False
# Remote storage: not supported
return False
def delete_note(self, file_identifier: str, name: str, **kwargs: Any) -> bool:
client = self._ensure_client()
if client is None:
return False
if self._service == "hydrus":
try:
client.delete_note(file_identifier, name)
return True
except Exception:
return False
return False

View File

@@ -64,9 +64,7 @@ def _discover_store_classes() -> Dict[str, Type[BaseStore]]:
discovered: Dict[str, Type[BaseStore]] = {}
for module_info in pkgutil.iter_modules(store_pkg.__path__):
module_name = module_info.name
if module_name in {"__init__", "_base", "registry"}:
continue
if module_name.lower() == "folder":
if module_name.startswith(("_", "registry")):
continue
try:

7
TUI.py
View File

@@ -47,7 +47,6 @@ from SYS.cmdlet_catalog import ensure_registry_loaded, list_cmdlet_names # type
from SYS.cli_syntax import validate_pipeline_text # type: ignore # noqa: E402
from TUI.pipeline_runner import PipelineRunner # type: ignore # noqa: E402
from SYS.background_services import ensure_zerotier_server_running, stop_zerotier_server
def _dedup_preserve_order(items: List[str]) -> List[str]:
@@ -503,13 +502,7 @@ class PipelineHubApp(App):
if self.worker_table:
self.worker_table.add_columns("ID", "Type", "Status", "Details")
self.set_interval(5.0, ensure_zerotier_server_running)
def on_unmount(self) -> None:
stop_zerotier_server()
async def _manage_zerotier_server(self) -> None:
# Method removed - logic moved to SYS.background_services
pass
# Initialize the store choices cache at startup (filters disabled stores)

View File

@@ -128,7 +128,6 @@ class ConfigModal(ModalScreen):
yield Label("Categories", classes="config-label")
with ListView(id="category-list"):
yield ListItem(Label("Global Settings"), id="cat-globals")
yield ListItem(Label("Connectors"), id="cat-networking")
yield ListItem(Label("Stores"), id="cat-stores")
yield ListItem(Label("Providers"), id="cat-providers")
@@ -138,44 +137,62 @@ class ConfigModal(ModalScreen):
yield Button("Save", variant="success", id="save-btn")
yield Button("Add Store", variant="primary", id="add-store-btn")
yield Button("Add Provider", variant="primary", id="add-provider-btn")
yield Button("Add Net", variant="primary", id="add-net-btn")
yield Button("Back", id="back-btn")
yield Button("Close", variant="error", id="cancel-btn")
def on_mount(self) -> None:
self.query_one("#add-store-btn", Button).display = False
self.query_one("#add-provider-btn", Button).display = False
self.query_one("#add-net-btn", Button).display = False
self.refresh_view()
def refresh_view(self) -> None:
container = self.query_one("#fields-container", ScrollableContainer)
"""
Refresh the content area. We debounce this call and use a render_id
to avoid race conditions with Textual's async widget mounting.
"""
self._render_id = getattr(self, "_render_id", 0) + 1
if hasattr(self, "_refresh_timer"):
self._refresh_timer.stop()
self._refresh_timer = self.set_timer(0.02, self._actual_refresh)
def _actual_refresh(self) -> None:
try:
container = self.query_one("#fields-container", ScrollableContainer)
except Exception:
return
self._button_id_map.clear()
self._input_id_map.clear()
# Clear existing synchronously
for child in list(container.children):
child.remove()
# Clear existing
container.query("*").remove()
# Update visibility of buttons
try:
self.query_one("#add-store-btn", Button).display = (self.current_category == "stores" and self.editing_item_name is None)
self.query_one("#add-provider-btn", Button).display = (self.current_category == "providers" and self.editing_item_name is None)
self.query_one("#add-net-btn", Button).display = (self.current_category == "networking" and self.editing_item_name is None)
self.query_one("#back-btn", Button).display = (self.editing_item_name is not None)
self.query_one("#save-btn", Button).display = (self.editing_item_name is not None or self.current_category == "globals")
except Exception:
pass
# We mount using call_after_refresh to ensure the removals are processed by Textual
# before we try to mount new widgets with potentially duplicate IDs.
render_id = self._render_id
def do_mount():
# If a new refresh was started, ignore this old mount request
if getattr(self, "_render_id", 0) != render_id:
return
# Final check that container is empty. remove() is async.
if container.children:
for child in list(container.children):
child.remove()
if self.editing_item_name:
self.render_item_editor(container)
elif self.current_category == "globals":
self.render_globals(container)
elif self.current_category == "networking":
self.render_networking(container)
elif self.current_category == "stores":
self.render_stores(container)
elif self.current_category == "providers":
@@ -241,73 +258,6 @@ class ConfigModal(ModalScreen):
row.mount(Button("Paste", id=f"paste-{inp_id}", classes="paste-btn"))
idx += 1
def render_networking(self, container: ScrollableContainer) -> None:
container.mount(Label("ZeroTier Networks (local)", classes="config-label"))
from API import zerotier as zt
# Show whether we have an explicit authtoken available and its source
try:
token_src = zt._get_token_path()
except Exception:
token_src = None
if token_src == "env":
container.mount(Static("Auth: authtoken provided via env var (ZEROTIER_AUTH_TOKEN) — no admin required", classes="config-note"))
elif token_src:
container.mount(Static(f"Auth: authtoken file found: {token_src} — no admin required", classes="config-note"))
else:
container.mount(Static("Auth: authtoken not found in workspace; TUI may need admin to join networks", classes="config-warning"))
try:
local_nets = zt.list_networks()
if not local_nets:
container.mount(Static("No active ZeroTier networks found on this machine."))
else:
for n in local_nets:
row = Horizontal(
Static(f"{n.name} [{n.id}] - {n.status}", classes="item-label"),
Button("Leave", variant="error", id=f"zt-leave-{n.id}"),
classes="item-row"
)
container.mount(row)
except Exception as exc:
container.mount(Static(f"Error listing ZeroTier networks: {exc}"))
container.mount(Rule())
container.mount(Label("Connectors", classes="config-label"))
net = self.config_data.get("networking", {})
if not net:
container.mount(Static("No connectors configured."))
else:
idx = 0
for ntype, conf in net.items():
edit_id = f"edit-net-{idx}"
del_id = f"del-net-{idx}"
self._button_id_map[edit_id] = ("edit", "networking", ntype)
self._button_id_map[del_id] = ("del", "networking", ntype)
idx += 1
label = ntype
if ntype == "zerotier":
serve = conf.get("serve", "Unknown")
net_id = conf.get("network_id", "Unknown")
net_name = net_id
try:
for ln in local_nets:
if ln.id == net_id:
net_name = ln.name
break
except Exception: pass
label = f"{serve} ---> {net_name}"
row = Horizontal(
Static(label, classes="item-label"),
Button("Edit", id=edit_id),
Button("Delete", variant="error", id=del_id),
classes="item-row"
)
container.mount(row)
def render_stores(self, container: ScrollableContainer) -> None:
container.mount(Label("Configured Stores", classes="config-label"))
stores = self.config_data.get("store", {})
@@ -402,30 +352,6 @@ class ConfigModal(ModalScreen):
except Exception:
pass
# Fetch Networking schema
if item_type == "networking":
if item_name == "zerotier":
from API import zerotier as zt
local_net_choices = []
try:
for n in zt.list_networks():
local_net_choices.append((f"{n.name} ({n.id})", n.id))
except Exception: pass
local_store_choices = []
for s_type, s_data in self.config_data.get("store", {}).items():
for s_name in s_data.keys():
local_store_choices.append(s_name)
schema = [
{"key": "network_id", "label": "Network to Share on", "choices": local_net_choices},
{"key": "serve", "label": "Local Store to Share", "choices": local_store_choices},
{"key": "port", "label": "Port", "default": "999"},
{"key": "api_key", "label": "Access Key (API Key)", "default": "", "secret": True},
]
for f in schema:
provider_schema_map[f["key"].upper()] = f
# Use columns for better layout of inputs with paste buttons
container.mount(Label("Edit Settings"))
# render_item_editor will handle the inputs for us if we set these
@@ -583,8 +509,6 @@ class ConfigModal(ModalScreen):
if not event.item: return
if event.item.id == "cat-globals":
self.current_category = "globals"
elif event.item.id == "cat-networking":
self.current_category = "networking"
elif event.item.id == "cat-stores":
self.current_category = "stores"
elif event.item.id == "cat-providers":
@@ -608,24 +532,7 @@ class ConfigModal(ModalScreen):
if not self.validate_current_editor():
return
try:
# If we are editing networking.zerotier, check if network_id changed and join it
if self.editing_item_type == "networking" and self.editing_item_name == "zerotier":
old_id = str(self.config_data.get("networking", {}).get("zerotier", {}).get("network_id") or "").strip()
self.save_all()
new_id = str(self.config_data.get("networking", {}).get("zerotier", {}).get("network_id") or "").strip()
if new_id and new_id != old_id:
from API import zerotier as zt
try:
if zt.join_network(new_id):
self.notify(f"Joined ZeroTier network {new_id}")
else:
self.notify(f"Config saved, but failed to join network {new_id}", severity="warning")
except Exception as exc:
self.notify(f"Join error: {exc}", severity="error")
else:
self.save_all()
self.save_all()
self.notify("Configuration saved!")
# Return to the main list view within the current category
self.editing_item_name = None
@@ -633,15 +540,6 @@ class ConfigModal(ModalScreen):
self.refresh_view()
except Exception as exc:
self.notify(f"Save failed: {exc}", severity="error", timeout=10)
elif bid.startswith("zt-leave-"):
nid = bid.replace("zt-leave-", "")
from API import zerotier as zt
try:
zt.leave_network(nid)
self.notify(f"Left ZeroTier network {nid}")
self.refresh_view()
except Exception as exc:
self.notify(f"Failed to leave: {exc}", severity="error")
elif bid in self._button_id_map:
action, itype, name = self._button_id_map[bid]
if action == "edit":
@@ -657,9 +555,6 @@ class ConfigModal(ModalScreen):
elif itype == "provider":
if "provider" in self.config_data and name in self.config_data["provider"]:
del self.config_data["provider"][name]
elif itype == "networking":
if "networking" in self.config_data and name in self.config_data["networking"]:
del self.config_data["networking"][name]
self.refresh_view()
elif bid == "add-store-btn":
all_classes = _discover_store_classes()
@@ -685,9 +580,6 @@ class ConfigModal(ModalScreen):
except Exception:
pass
self.app.push_screen(SelectionModal("Select Provider Type", options), callback=self.on_provider_type_selected)
elif bid == "add-net-btn":
options = ["zerotier"]
self.app.push_screen(SelectionModal("Select Networking Service", options), callback=self.on_net_type_selected)
elif bid.startswith("paste-"):
# Programmatic paste button
target_id = bid.replace("paste-", "")
@@ -725,124 +617,6 @@ class ConfigModal(ModalScreen):
def on_store_type_selected(self, stype: str) -> None:
if not stype: return
if stype == "zerotier":
# Push a discovery wizard
from TUI.modalscreen.selection_modal import SelectionModal
from API import zerotier as zt
# 1. Choose Network
joined = zt.list_networks()
if not joined:
self.notify("Error: Join a ZeroTier network first in 'Connectors'", severity="error")
return
net_options = [f"{n.name or 'Network'} ({n.id})" for n in joined]
def on_net_selected(net_choice: str):
if not net_choice: return
net_id = net_choice.split("(")[-1].rstrip(")")
# 2. Host or Connect?
def on_mode_selected(mode: str):
if not mode: return
if mode == "Host (Share a local store)":
# 3a. Select Local Store to Share
local_stores = []
for s_type, s_data in self.config_data.get("store", {}).items():
if s_type == "zerotier": continue
for s_name in s_data.keys():
local_stores.append(f"{s_name} ({s_type})")
if not local_stores:
self.notify("No local stores available to share.", severity="error")
return
def on_share_selected(share_choice: str):
if not share_choice: return
share_name = share_choice.split(" (")[0]
# Update networking config
if "networking" not in self.config_data: self.config_data["networking"] = {}
zt_net = self.config_data["networking"].setdefault("zerotier", {})
zt_net["serve"] = share_name
zt_net["network_id"] = net_id
if not zt_net.get("port"):
zt_net["port"] = "999"
try:
self.save_all()
from SYS.background_services import ensure_zerotier_server_running
ensure_zerotier_server_running()
self.notify(f"ZeroTier auto-saved: Sharing '{share_name}' on network {net_id}")
except Exception as e:
self.notify(f"Auto-save failed: {e}", severity="error")
self.refresh_view()
self.app.push_screen(SelectionModal("Select Local Store to Share", local_stores), callback=on_share_selected)
else:
# 3b. Connect to Remote Peer - Background Discovery
@work
async def run_discovery(node):
self.notify(f"Discovery: Scanning {net_id} for peers...", timeout=5)
central_token = self.config_data.get("networking", {}).get("zerotier", {}).get("api_key")
try:
import asyncio
from functools import partial
loop = asyncio.get_event_loop()
probes = await loop.run_in_executor(None, partial(
zt.discover_services_on_network, net_id, ports=[999, 45869], api_token=central_token
))
except Exception as e:
self.notify(f"Discovery error: {e}", severity="error")
return
if not probes:
self.notify("No peers found. Check firewall or server status.", severity="warning")
return
peer_options = []
for p in probes:
label = "Remote"
if isinstance(p.payload, dict):
label = p.payload.get("name") or p.payload.get("peer_id") or label
status = " [Locked]" if p.status_code == 401 else ""
peer_options.append(f"{p.address} ({label}){status}")
def on_peer_selected(choice: str):
if not choice: return
addr = choice.split(" ")[0]
match = next((p for p in probes if p.address == addr), None)
if match:
save_connected_store(match)
self.app.push_screen(SelectionModal("Select Peer to Connect", peer_options), callback=on_peer_selected)
def save_connected_store(p: zt.ZeroTierServiceProbe):
new_name = f"zt_{p.address.replace('.', '_')}"
if "store" not in self.config_data: self.config_data["store"] = {}
store_cfg = self.config_data["store"].setdefault("zerotier", {})
store_cfg[new_name] = {
"NAME": new_name,
"NETWORK_ID": net_id,
"HOST": p.address,
"PORT": str(p.port),
"SERVICE": p.service_hint or "remote"
}
self.save_all()
self.notify(f"Connected to {p.address}")
self.refresh_view()
run_discovery(self)
self.app.push_screen(SelectionModal("ZeroTier Mode", ["Host (Share a local store)", "Connect (Use a remote store)"]), callback=on_mode_selected)
self.app.push_screen(SelectionModal("Select ZeroTier Network", net_options), callback=on_net_selected)
return
new_name = f"new_{stype}"
if "store" not in self.config_data:
self.config_data["store"] = {}
@@ -907,18 +681,6 @@ class ConfigModal(ModalScreen):
self.editing_item_name = ptype
self.refresh_view()
def on_net_type_selected(self, ntype: str) -> None:
if not ntype: return
self.editing_item_type = "networking"
self.editing_item_name = ntype
# Ensure it exists in config_data
net = self.config_data.setdefault("networking", {})
if ntype not in net:
net[ntype] = {}
self.refresh_view()
def _update_config_value(self, widget_id: str, value: Any) -> None:
if widget_id not in self._input_id_map:
return

View File

@@ -31,8 +31,7 @@ from ._shared import (
from SYS import pipeline as ctx
STORAGE_ORIGINS = {"local",
"hydrus",
"zerotier"}
"hydrus"}
class _WorkerLogger:

View File

@@ -251,35 +251,6 @@ def _run(result: Any, args: List[str], config: Dict[str, Any]) -> int:
except Exception as exc:
debug(f"Cookies check failed: {exc}")
# ZeroTier Hosting
zt_conf = config.get("networking", {}).get("zerotier", {})
if zt_conf.get("serve"):
from SYS.background_services import ensure_zerotier_server_running
try:
debug("ZeroTier hosting enabled; ensuring server is running")
ensure_zerotier_server_running()
except Exception as exc:
debug(f"ensure_zerotier_server_running failed: {exc}")
serve_target = zt_conf.get("serve")
port = zt_conf.get("port") or 999
status = "OFFLINE"
detail = f"Sharing: {serve_target} on port {port}"
try:
from API.HTTP import HTTPClient
debug(f"Probing ZeroTier health on 127.0.0.1:{port}")
# Probing 127.0.0.1 is more reliable on Windows than localhost
with HTTPClient(timeout=1.0, retries=0) as client:
resp = client.get(f"http://127.0.0.1:{port}/health")
if resp.status_code == 200:
status = "ONLINE"
payload = resp.json()
detail += f" (Live: {payload.get('name', 'unknown')})"
debug(f"ZeroTier host responded: status={resp.status_code}, payload_keys={list(payload.keys()) if isinstance(payload, dict) else 'unknown'}")
except Exception as exc:
debug(f"ZeroTier probe failed: {exc}")
_add_startup_check(startup_table, status, "ZeroTier Host", detail=detail)
except Exception as exc:
debug(f"Status check failed: {exc}")

View File

@@ -1,152 +0,0 @@
import sys
import requests
from pathlib import Path
from typing import Any, Dict, Sequence
# Add project root to sys.path
root = Path(__file__).resolve().parent.parent
if str(root) not in sys.path:
sys.path.insert(0, str(root))
from cmdlet._shared import Cmdlet
from SYS.config import load_config
from SYS.result_table import Table
from API import zerotier as zt
def exec_zerotier(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
# Use provided config or fall back to CWD load
cfg = config if config else load_config(Path.cwd())
table = Table("ZeroTier Status")
# 1. Local Hub Status
row = table.add_row()
row.add_column("TYPE", "HOST")
row.add_column("NAME", "localhost")
# Try to get node ID via CLI info
node_id = "???"
try:
if hasattr(zt, "_run_cli_json"):
info = zt._run_cli_json("info", "-j")
node_id = info.get("address", "???")
except:
pass
row.add_column("ID", node_id)
# Check if local server is responsive
try:
# endpoint is /health for remote_storage_server
# We try 127.0.0.1 first with a more generous timeout
# Using a list of potential local hits to be robust against Windows networking quirks
import socket
def get_local_ip():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
s.close()
return ip
except:
return None
hosts = ["127.0.0.1", "localhost"]
local_ip = get_local_ip()
if local_ip:
hosts.append(local_ip)
success = False
last_err = ""
# Try multiple times if server just started
import time
for attempt in range(3):
for host in hosts:
try:
resp = requests.get(f"http://{host}:999/health", timeout=3, proxies={"http": None, "https": None})
if resp.status_code == 200:
row.add_column("STATUS", "ONLINE")
row.add_column("ADDRESS", f"{host}:999")
row.add_column("DETAIL", f"Serving {cfg.get('active_store', 'default')}")
success = True
break
elif resp.status_code == 401:
row.add_column("STATUS", "Serving (Locked)")
row.add_column("ADDRESS", f"{host}:999")
row.add_column("DETAIL", "401 Unauthorized - API Key required")
success = True
break
except Exception as e:
last_err = str(e)
continue
if success:
break
time.sleep(1) # Wait between attempts
if not success:
row.add_column("STATUS", "OFFLINE")
row.add_column("ADDRESS", "127.0.0.1:999")
row.add_column("DETAIL", f"Server not responding on port 999. Last attempt ({hosts[-1]}): {last_err}")
except Exception as e:
row.add_column("STATUS", "OFFLINE")
row.add_column("ADDRESS", "127.0.0.1:999")
row.add_column("DETAIL", f"Status check failed: {e}")
# 2. Add Networks
if zt.is_available():
try:
networks = zt.list_networks()
for net in networks:
row = table.add_row()
row.add_column("TYPE", "NETWORK")
row.add_column("NAME", getattr(net, "name", "Unnamed"))
row.add_column("ID", getattr(net, "id", ""))
status = getattr(net, "status", "OK")
assigned = getattr(net, "assigned_addresses", [])
ip_str = assigned[0] if assigned else ""
row.add_column("STATUS", status)
row.add_column("ADDRESS", ip_str)
except Exception as e:
row = table.add_row()
row.add_column("TYPE", "ERROR")
row.add_column("DETAIL", f"Failed to list networks: {e}")
else:
row = table.add_row()
row.add_column("TYPE", "SYSTEM")
row.add_column("NAME", "ZeroTier")
row.add_column("STATUS", "NOT FOUND")
row.add_column("DETAIL", "zerotier-cli not in path")
# Output
try:
from cmdnat.out_table import TableOutput
TableOutput().render(table)
except Exception:
# Fallback for raw CLI
print(f"\n--- {table.title} ---")
for r in table.rows:
# Use the get_column method from ResultRow
t = r.get_column("TYPE") or ""
n = r.get_column("NAME") or ""
s = r.get_column("STATUS") or ""
a = r.get_column("ADDRESS") or ""
id = r.get_column("ID") or ""
d = r.get_column("DETAIL") or ""
print(f"[{t:7}] {n:15} | {s:15} | {a:20} | {id} | {d}")
print("-" * 100)
return 0
CMDLET = Cmdlet(
name=".zerotier",
summary="Check ZeroTier node and hosting status",
usage=".zerotier",
exec=exec_zerotier,
)
if __name__ == "__main__":
exec_zerotier(None, sys.argv[1:], {})

View File

@@ -1,91 +0,0 @@
# ZeroTier integration (store sharing)
This document describes how Medios-Macina integrates with ZeroTier to share
storage backends between machines on a private virtual network.
Goals
- Allow you to expose stores (folder-based, remote storage server, Hydrus client)
to other members of your ZeroTier network.
- Keep the CLI experience identical: remote stores appear as normal `-store` backends.
- Use secure authentication (API keys / per-store tokens) and limit exposure to private network.
Prerequisites
- Each machine must run `zerotier-one` and be a member of your ZeroTier network.
- The Medios-Macina instance on each machine should run the `remote_storage_server.py`
or a Hydrus client instance you want to expose.
- The remote storage server requires Flask and Flask-CORS to run (install with: `pip install flask flask-cors`).
Auto-install behavior
- When a `zerotier` section is present in `config.conf` **or** a `store=zerotier` instance is configured, the CLI will attempt to auto-install the required packages (`flask`, `flask-cors`, and `werkzeug`) on startup unless you disable it with `auto_install = false` in the `zerotier` config block. This mirrors the behavior for other optional features (e.g., Soulseek).
- On your controller/management machine, authorize members via ZeroTier Central.
Configuration (conceptual)
You can configure networks and Zerotier-backed stores in your `config.conf`. Here
are example snippets and recommendations.
## Top-level ZeroTier networks (recommended)
Use a `zerotier` section to list networks your instance is willing to use/auto-join:
```ini
[zerotier]
# Example config (implementation treats this as a dict via the loader)
# networks:
# home:
# network_id: 8056c2e21c000001
# api_key: my-zt-central-token ; optional, only needed for automating member authorization
# auto_join: true
# prefer_hosts: ["192.168.86.42"] ; optional peer IP inside the ZT network
```
## Store config (ZeroTier store instances)
Add a `store=zerotier` block so the Store registry can create a ZeroTier store instance:
```ini
[store=zerotier]
my-remote = { "NAME": "my-remote", "NETWORK_ID": "8056c2e21c000001", "SERVICE": "remote", "PORT": 999, "API_KEY": "myremotekey" }
hydrus-remote = { "NAME": "hydrus-remote", "NETWORK_ID": "8056c2e21c000001", "SERVICE": "hydrus", "PORT": 45869, "API_KEY": "hydrus-access-key" }
```
- `SERVICE` can be `remote` (our `remote_storage_server`), or `hydrus` (Hydrus API).
- `HOST` is optional; if present, discovery is skipped and the host:port is used.
- `API_KEY` will be sent as `X-API-Key` (and Hydrus access keys, when relevant).
Operation & discovery
- The local ZeroTier store wrapper will attempt to discover peers on the configured
ZeroTier network by inspecting assigned addresses on this node and probing common
service endpoints (e.g., `/health`, `/api_version`).
- For `hydrus` service types we look for Hydrus-style `/api_version` responses.
- For `remote` service types we look for our `remote_storage_server` `/health` endpoint.
Security notes
- Your ZeroTier network provides a private IP layer, but the exposed services
should still require authentication (API keys) and enforce scope (read/write).
- If you plan to expose stores to other users, consider per-store API keys with
roles (read-only, write, admin) and monitor/audit access.
Next steps / prototyping
- The first prototype in this repo adds `API/zerotier.py` (discovery + join helpers)
and `Store/ZeroTier.py` (a store wrapper that proxies to `hydrus` or `remote` endpoints).
- Upload support (server-side `POST /files/upload`) is now implemented allowing authenticated multipart uploads; the ZeroTier store wrapper supports `add_file()` and the `add-file` cmdlet can be used with a configured ZeroTier store for end-to-end uploads.
Example: upload via the helper script (discovers a remote on the network and uploads the file):
```powershell
python .\scripts\zerotier_setup.py --upload 8056c2e21c000001 --file "C:\path\to\file.mp4" --api-key myremotekey --tag tag1 --tag tag2
```
Or using curl directly against a discovered ZeroTier peer's IP:
```powershell
curl -X POST -H "X-API-Key: myremotekey" -F "file=@/path/to/file.mp4" -F "tag=tag1" http://<zerotier-ip>:999/files/upload
```
If you'd like I can:
- Add an example `scripts/zt-join.py` helper that uses the API wrapper to join a network;
- Add a presigned-upload + multipart upload flow to `scripts/remote_storage_server.py` so
ZeroTier stores can support `add-file` uploads directly.
Tell me which of the above you want next (upload support, auto-join helper, or presigned flow) and I'll proceed.

View File

@@ -1,773 +0,0 @@
"""Remote Storage Server - REST API for file management on mobile devices.
This server runs on a mobile device (Android with Termux, iOS with iSH, etc.)
and exposes the local library database as a REST API. Your PC connects to this
server and uses it as a remote storage backend through the RemoteStorageBackend.
## INSTALLATION
### On Android (Termux):
1. Install Termux from Play Store: https://play.google.com/store/apps/details?id=com.termux
2. In Termux:
$ apt update && apt install python
$ pip install flask flask-cors
3. Copy this file to your device
4. Run it (with optional API key):
$ python remote_storage_server.py --storage-path /path/to/storage --port 999
$ python remote_storage_server.py --storage-path /path/to/storage --api-key mysecretkey
5. Server prints connection info automatically (IP, port, API key)
### On PC:
1. Install requests: pip install requests
2. Add to config.conf:
[store=remote]
name="phone"
url="http://192.168.1.100:999"
api_key="mysecretkey"
timeout=30
Note: API key is optional. Works on WiFi or cellular data.
## USAGE
After setup, all cmdlet work with the phone:
$ search-file zohar -store phone
$ @1-3 | add-relationship -king @4 -store phone
$ @1 | get-relationship -store phone
The server exposes REST endpoints that RemoteStorageBackend uses internally.
"""
from __future__ import annotations
import os
import sys
import argparse
import logging
import threading
import time
from pathlib import Path
from typing import Optional, Dict, Any
from datetime import datetime
from functools import wraps
# Add parent directory to path for imports
sys.path.insert(0, str(Path(__file__).parent.parent))
# ============================================================================
# CONFIGURATION
# ============================================================================
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s: %(message)s"
)
logger = logging.getLogger(__name__)
STORAGE_PATH: Optional[Path] = None
API_KEY: Optional[str] = None # API key for authentication (None = no auth required)
# Cache for database connection to prevent "database is locked" on high frequency requests
_DB_CACHE: Dict[str, Any] = {}
def get_db(path: Path):
from API.folder import LocalLibrarySearchOptimizer
p_str = str(path)
if p_str not in _DB_CACHE:
_DB_CACHE[p_str] = LocalLibrarySearchOptimizer(path)
_DB_CACHE[p_str].__enter__()
return _DB_CACHE[p_str]
# Try importing Flask - will be used in main() only
try:
from flask import Flask, request, jsonify
from flask_cors import CORS
HAS_FLASK = True
except ImportError:
HAS_FLASK = False
# ============================================================================
# UTILITY FUNCTIONS
# ============================================================================
def monitor_parent(parent_pid: int):
"""Monitor the parent process and shut down if it dies."""
if parent_pid <= 1:
return
logger.info(f"Monitoring parent process {parent_pid}")
# On Windows, we might need a different approach if os.kill(pid, 0) is unreliable
is_windows = sys.platform == "win32"
while True:
try:
if is_windows:
# OpenProcess with PROCESS_QUERY_LIMITED_INFORMATION (0x1000)
# This is safer than os.kill on Windows for existence checks
import ctypes
PROCESS_QUERY_LIMITED_INFORMATION = 0x1000
handle = ctypes.windll.kernel32.OpenProcess(PROCESS_QUERY_LIMITED_INFORMATION, False, parent_pid)
if handle:
exit_code = ctypes.c_ulong()
ctypes.windll.kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code))
ctypes.windll.kernel32.CloseHandle(handle)
# STILL_ACTIVE is 259
if exit_code.value != 259:
logger.info(f"Parent process {parent_pid} finished with code {exit_code.value}. Shutting down...")
os._exit(0)
else:
# On Windows, sometimes we lose access to the handle if the parent is transitioning
# or if it was started from a shell that already closed.
# We'll ignore handle failures for now unless we want to be very strict.
pass
else:
os.kill(parent_pid, 0)
except Exception as e:
# Parent is dead or inaccessible
logger.info(f"Parent process {parent_pid} no longer accessible: {e}. Shutting down server...")
os._exit(0)
time.sleep(5) # Increase check interval to be less aggressive
def get_local_ip() -> Optional[str]:
"""Get the local IP address that would be used for external connections."""
import socket
try:
# Create a socket to determine which interface would be used
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80)) # Google DNS
ip = s.getsockname()[0]
s.close()
return ip
except Exception:
return None
# ============================================================================
# FLASK APP FACTORY
# ============================================================================
def create_app():
"""Create and configure Flask app with all routes."""
if not HAS_FLASK:
raise ImportError(
"Flask not installed. Install with: pip install flask flask-cors"
)
from flask import Flask, request, jsonify, send_file
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
# ========================================================================
# HELPER DECORATORS
# ========================================================================
def require_auth():
"""Decorator to check API key authentication if configured."""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if API_KEY:
# Get API key from header or query parameter
provided_key = request.headers.get("X-API-Key"
) or request.args.get("api_key")
if not provided_key or provided_key != API_KEY:
return jsonify({"error": "Unauthorized. Invalid or missing API key."}), 401
return f(*args, **kwargs)
return decorated_function
return decorator
def require_storage():
"""Decorator to ensure storage path is configured."""
def decorator(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if not STORAGE_PATH:
return jsonify({"error": "Storage path not configured"}), 500
return f(*args, **kwargs)
return decorated_function
return decorator
# ========================================================================
# HEALTH CHECK
# ========================================================================
@app.route("/health", methods=["GET"])
def health():
"""Check server health and storage availability."""
# Check auth manually to allow discovery even if locked
authed = True
if API_KEY:
provided_key = request.headers.get("X-API-Key") or request.args.get("api_key")
if not provided_key or provided_key != API_KEY:
authed = False
status = {
"status": "ok",
"service": "remote_storage",
"name": os.environ.get("MM_SERVER_NAME", "Remote Storage"),
"storage_configured": STORAGE_PATH is not None,
"timestamp": datetime.now().isoformat(),
"locked": not authed and API_KEY is not None
}
# If not authed but API_KEY is required, return minimal info for discovery
if not authed and API_KEY:
return jsonify(status), 200
if STORAGE_PATH:
status["storage_path"] = str(STORAGE_PATH)
status["storage_exists"] = STORAGE_PATH.exists()
try:
search_db = get_db(STORAGE_PATH)
status["database_accessible"] = True
except Exception as e:
status["database_accessible"] = False
status["database_error"] = str(e)
return jsonify(status), 200
# ========================================================================
# FILE OPERATIONS
# ========================================================================
@app.route("/files/search", methods=["GET"])
@require_auth()
@require_storage()
def search_files():
"""Search for files by name or tag."""
query = request.args.get("q", "")
limit = request.args.get("limit", 100, type=int)
# Allow empty query or '*' for "list everything"
db_query = query if query and query != "*" else ""
try:
search_db = get_db(STORAGE_PATH)
results = search_db.search_by_name(db_query, limit)
tag_results = search_db.search_by_tag(db_query, limit)
all_results_dict = {
r["hash"]: r
for r in (results + tag_results)
}
# Fetch tags for each result to support title extraction on client
if search_db.db:
for res in all_results_dict.values():
file_hash = res.get("hash")
if file_hash:
tags = search_db.db.get_tags(file_hash)
res["tag"] = tags
return (
jsonify(
{
"query": query,
"count": len(all_results_dict),
"files": list(all_results_dict.values()),
}
),
200,
)
except Exception as e:
logger.error(f"Search error: {e}", exc_info=True)
return jsonify({"error": f"Search failed: {str(e)}"}), 500
@app.route("/files/<file_hash>", methods=["GET"])
@require_auth()
@require_storage()
def get_file_metadata(file_hash: str):
"""Get metadata for a specific file by hash."""
try:
search_db = get_db(STORAGE_PATH)
db = search_db.db
if not db:
return jsonify({"error": "Database unavailable"}), 500
file_path = db.search_hash(file_hash)
if not file_path or not file_path.exists():
return jsonify({"error": "File not found"}), 404
metadata = db.get_metadata(file_hash)
tags = db.get_tags(file_hash) # Use hash string
return (
jsonify(
{
"hash": file_hash,
"path": str(file_path),
"size": file_path.stat().st_size,
"metadata": metadata,
"tag": tags,
}
),
200,
)
except Exception as e:
logger.error(f"Get metadata error: {e}", exc_info=True)
return jsonify({"error": f"Failed to get metadata: {str(e)}"}), 500
@app.route("/files/raw/<file_hash>", methods=["GET"])
@require_auth()
@require_storage()
def download_file(file_hash: str):
"""Download a raw file by hash."""
try:
search_db = get_db(STORAGE_PATH)
db = search_db.db
if not db:
return jsonify({"error": "Database unavailable"}), 500
file_path = db.search_hash(file_hash)
if not file_path or not file_path.exists():
return jsonify({"error": "File not found"}), 404
return send_file(file_path)
except Exception as e:
logger.error(f"Download error: {e}", exc_info=True)
return jsonify({"error": f"Download failed: {str(e)}"}), 500
@app.route("/files/index", methods=["POST"])
@require_auth()
@require_storage()
def index_file():
"""Index a new file in the storage."""
from SYS.utils import sha256_file
data = request.get_json() or {}
file_path_str = data.get("path")
tags = data.get("tag", [])
url = data.get("url", [])
if not file_path_str:
return jsonify({"error": "File path required"}), 400
try:
file_path = Path(file_path_str)
if not file_path.exists():
return jsonify({"error": "File does not exist"}), 404
search_db = get_db(STORAGE_PATH)
db = search_db.db
if not db:
return jsonify({"error": "Database unavailable"}), 500
db.get_or_create_file_entry(file_path)
if tags:
db.add_tags(file_path, tags)
if url:
db.add_url(file_path, url)
file_hash = sha256_file(file_path)
return (
jsonify(
{
"hash": file_hash,
"path": str(file_path),
"tags_added": len(tags),
"url_added": len(url),
}
),
201,
)
except Exception as e:
logger.error(f"Index error: {e}", exc_info=True)
return jsonify({"error": f"Indexing failed: {str(e)}"}), 500
@app.route("/files/upload", methods=["POST"])
@require_auth()
@require_storage()
def upload_file():
"""Upload a file into storage (multipart/form-data).
Accepts form fields:
- file: uploaded file (required)
- tag: repeated tag parameters or comma-separated string
- url: repeated url parameters or comma-separated string
"""
from API.folder import API_folder_store
from SYS.utils import sha256_file, sanitize_filename, ensure_directory, unique_path
if 'file' not in request.files:
return jsonify({"error": "file required"}), 400
file_storage = request.files.get('file')
if file_storage is None:
return jsonify({"error": "file required"}), 400
filename = sanitize_filename(file_storage.filename or "upload")
incoming_dir = STORAGE_PATH / "incoming"
target_path = incoming_dir / filename
target_path = unique_path(target_path)
try:
# Initialize the DB first (run safety checks) before creating any files.
with API_folder_store(STORAGE_PATH) as db:
# Ensure the incoming directory exists only after DB safety checks pass.
ensure_directory(incoming_dir)
# Save uploaded file to storage
file_storage.save(str(target_path))
# Extract optional metadata
tags = []
if 'tag' in request.form:
# Support repeated form fields or comma-separated list
tags = request.form.getlist('tag') or []
if not tags and request.form.get('tag'):
tags = [t.strip() for t in str(request.form.get('tag') or "").split(",") if t.strip()]
urls = []
if 'url' in request.form:
urls = request.form.getlist('url') or []
if not urls and request.form.get('url'):
urls = [u.strip() for u in str(request.form.get('url') or "").split(",") if u.strip()]
db.get_or_create_file_entry(target_path)
if tags:
db.add_tags(target_path, tags)
if urls:
db.add_url(target_path, urls)
file_hash = sha256_file(target_path)
return (
jsonify({
"hash": file_hash,
"path": str(target_path),
"tags_added": len(tags),
"url_added": len(urls),
}),
201,
)
except Exception as e:
logger.error(f"Upload error: {e}", exc_info=True)
return jsonify({"error": f"Upload failed: {str(e)}"}), 500
# ========================================================================
# TAG OPERATIONS
# ========================================================================
@app.route("/tags/<file_hash>", methods=["GET"])
@require_auth()
@require_storage()
def get_tags(file_hash: str):
"""Get tags for a file."""
from API.folder import API_folder_store
try:
with API_folder_store(STORAGE_PATH) as db:
file_path = db.search_hash(file_hash)
if not file_path:
return jsonify({"error": "File not found"}), 404
tags = db.get_tags(file_path)
return jsonify({"hash": file_hash, "tag": tags}), 200
except Exception as e:
logger.error(f"Get tags error: {e}", exc_info=True)
return jsonify({"error": f"Failed: {str(e)}"}), 500
@app.route("/tags/<file_hash>", methods=["POST"])
@require_auth()
@require_storage()
def add_tags(file_hash: str):
"""Add tags to a file."""
from API.folder import API_folder_store
data = request.get_json() or {}
tags = data.get("tag", [])
mode = data.get("mode", "add")
if not tags:
return jsonify({"error": "Tag required"}), 400
try:
with API_folder_store(STORAGE_PATH) as db:
file_path = db.search_hash(file_hash)
if not file_path:
return jsonify({"error": "File not found"}), 404
if mode == "replace":
db.remove_tags(file_path, db.get_tags(file_path))
db.add_tags(file_path, tags)
return jsonify({"hash": file_hash, "tag_added": len(tags), "mode": mode}), 200
except Exception as e:
logger.error(f"Add tags error: {e}", exc_info=True)
return jsonify({"error": f"Failed: {str(e)}"}), 500
@app.route("/tags/<file_hash>", methods=["DELETE"])
@require_auth()
@require_storage()
def remove_tags(file_hash: str):
"""Remove tags from a file."""
from API.folder import API_folder_store
tags_str = request.args.get("tag", "")
try:
with API_folder_store(STORAGE_PATH) as db:
file_path = db.search_hash(file_hash)
if not file_path:
return jsonify({"error": "File not found"}), 404
if tags_str:
tags_to_remove = [t.strip() for t in tags_str.split(",")]
else:
tags_to_remove = db.get_tags(file_path)
db.remove_tags(file_path, tags_to_remove)
return jsonify({"hash": file_hash, "tags_removed": len(tags_to_remove)}), 200
except Exception as e:
logger.error(f"Remove tags error: {e}", exc_info=True)
return jsonify({"error": f"Failed: {str(e)}"}), 500
# ========================================================================
# RELATIONSHIP OPERATIONS
# ========================================================================
@app.route("/relationships/<file_hash>", methods=["GET"])
@require_auth()
@require_storage()
def get_relationships(file_hash: str):
"""Get relationships for a file."""
from API.folder import API_folder_store
try:
with API_folder_store(STORAGE_PATH) as db:
file_path = db.search_hash(file_hash)
if not file_path:
return jsonify({"error": "File not found"}), 404
metadata = db.get_metadata(file_path)
relationships = metadata.get("relationships",
{}) if metadata else {}
return jsonify({"hash": file_hash, "relationships": relationships}), 200
except Exception as e:
logger.error(f"Get relationships error: {e}", exc_info=True)
return jsonify({"error": f"Failed: {str(e)}"}), 500
@app.route("/relationships", methods=["POST"])
@require_auth()
@require_storage()
def set_relationship():
"""Set a relationship between two files."""
from API.folder import API_folder_store
data = request.get_json() or {}
from_hash = data.get("from_hash")
to_hash = data.get("to_hash")
rel_type = data.get("type", "alt")
if not from_hash or not to_hash:
return jsonify({"error": "from_hash and to_hash required"}), 400
try:
with API_folder_store(STORAGE_PATH) as db:
from_path = db.search_hash(from_hash)
to_path = db.search_hash(to_hash)
if not from_path or not to_path:
return jsonify({"error": "File not found"}), 404
db.set_relationship(from_path, to_path, rel_type)
return jsonify({"from_hash": from_hash, "to_hash": to_hash, "type": rel_type}), 200
except Exception as e:
logger.error(f"Set relationship error: {e}", exc_info=True)
return jsonify({"error": f"Failed: {str(e)}"}), 500
# ========================================================================
# URL OPERATIONS
# ========================================================================
@app.route("/url/<file_hash>", methods=["GET"])
@require_auth()
@require_storage()
def get_url(file_hash: str):
"""Get known url for a file."""
from API.folder import API_folder_store
try:
with API_folder_store(STORAGE_PATH) as db:
file_path = db.search_hash(file_hash)
if not file_path:
return jsonify({"error": "File not found"}), 404
metadata = db.get_metadata(file_path)
url = metadata.get("url", []) if metadata else []
return jsonify({"hash": file_hash, "url": url}), 200
except Exception as e:
logger.error(f"Get url error: {e}", exc_info=True)
return jsonify({"error": f"Failed: {str(e)}"}), 500
@app.route("/url/<file_hash>", methods=["POST"])
@require_auth()
@require_storage()
def add_url(file_hash: str):
"""Add url to a file."""
from API.folder import API_folder_store
data = request.get_json() or {}
url = data.get("url", [])
if not url:
return jsonify({"error": "url required"}), 400
try:
with API_folder_store(STORAGE_PATH) as db:
file_path = db.search_hash(file_hash)
if not file_path:
return jsonify({"error": "File not found"}), 404
db.add_url(file_path, url)
return jsonify({"hash": file_hash, "url_added": len(url)}), 200
except Exception as e:
logger.error(f"Add url error: {e}", exc_info=True)
return jsonify({"error": f"Failed: {str(e)}"}), 500
return app
# ============================================================================
# MAIN
# ============================================================================
def main():
if not HAS_FLASK:
print("ERROR: Flask and flask-cors required")
print("Install with: pip install flask flask-cors")
sys.exit(1)
parser = argparse.ArgumentParser(
description="Remote Storage Server for Medios-Macina",
epilog=
"Example: python remote_storage_server.py --storage-path /storage/media --port 999 --api-key mysecretkey",
)
parser.add_argument(
"--storage-path",
type=str,
required=True,
help="Path to storage directory"
)
parser.add_argument(
"--host",
type=str,
default="0.0.0.0",
help="Server host (default: 0.0.0.0)"
)
parser.add_argument(
"--port",
type=int,
default=999,
help="Server port (default: 999)"
)
parser.add_argument(
"--api-key",
type=str,
default=None,
help="API key for authentication (optional)"
)
parser.add_argument("--debug", action="store_true", help="Enable debug mode")
parser.add_argument(
"--monitor",
action="store_true",
help="Shut down if parent process dies"
)
parser.add_argument(
"--parent-pid",
type=int,
default=None,
help="Explicit PID to monitor (defaults to the immediate parent process)",
)
args = parser.parse_args()
# Start monitor thread if requested
if args.monitor:
monitor_pid = args.parent_pid or os.getppid()
if monitor_pid > 1:
monitor_thread = threading.Thread(
target=monitor_parent,
args=(monitor_pid, ),
daemon=True
)
monitor_thread.start()
global STORAGE_PATH, API_KEY
STORAGE_PATH = Path(args.storage_path).resolve()
API_KEY = args.api_key
if not STORAGE_PATH.exists():
print(f"ERROR: Storage path does not exist: {STORAGE_PATH}")
sys.exit(1)
# Get local IP address
local_ip = get_local_ip()
if not local_ip:
local_ip = "127.0.0.1"
print(f"\n{'='*70}")
print("Remote Storage Server - Medios-Macina")
print(f"{'='*70}")
print(f"Storage Path: {STORAGE_PATH}")
print(f"Local IP: {local_ip}")
print(f"Server URL: http://{local_ip}:{args.port}")
print(f"Health URL: http://{local_ip}:{args.port}/health")
print(
f"API Key: {'Enabled - ' + ('***' + args.api_key[-4:]) if args.api_key else 'Disabled (no auth)'}"
)
print(f"Debug Mode: {args.debug}")
print("\n📋 Config for config.conf:")
print("[store=remote]")
print('name="phone"')
print(f'url="http://{local_ip}:{args.port}"')
if args.api_key:
print(f'api_key="{args.api_key}"')
print("timeout=30")
print("\nOR use ZeroTier Networking (Server Side):")
print("[networking=zerotier]")
print(f'serve="{STORAGE_PATH.name}"')
print(f'port="{args.port}"')
if args.api_key:
print(f'api_key="{args.api_key}"')
print(f"\n{'='*70}\n")
try:
from API.folder import API_folder_store
with API_folder_store(STORAGE_PATH) as db:
logger.info("Database initialized successfully")
except Exception as e:
logger.error(f"Failed to initialize database: {e}")
sys.exit(1)
app = create_app()
app.run(host=args.host, port=args.port, debug=args.debug, use_reloader=False)
if __name__ == "__main__":
main()

View File

@@ -1,133 +0,0 @@
#!/usr/bin/env python3
"""Simple ZeroTier helper for joining networks and discovering peers.
Usage:
python scripts/zerotier_setup.py --join <network_id>
python scripts/zerotier_setup.py --list
python scripts/zerotier_setup.py --discover <network_id>
This is a convenience tool to exercise the API/zerotier.py functionality while
prototyping and bringing up remote peers for store testing.
"""
from __future__ import annotations
import argparse
import json
import sys
from pathlib import Path
from SYS.logger import log
try:
from API import zerotier
except Exception:
zerotier = None
def main(argv=None):
parser = argparse.ArgumentParser(description="ZeroTier helper for Medios-Macina")
parser.add_argument("--list", action="store_true", help="List local ZeroTier networks")
parser.add_argument("--join", type=str, help="Join a ZeroTier network by ID")
parser.add_argument("--leave", type=str, help="Leave a ZeroTier network by ID")
parser.add_argument("--discover", type=str, help="Discover services on a ZeroTier network ID")
parser.add_argument("--upload", type=str, help="Upload a file to a discovered 'remote' service on this ZeroTier network ID")
parser.add_argument("--file", type=str, help="Local file to upload (used with --upload)")
parser.add_argument("--tag", action="append", help="Tag to attach (repeatable)", default=[])
parser.add_argument("--url", action="append", help="URL to associate (repeatable)", default=[])
parser.add_argument("--api-key", type=str, help="API key to use for uploads (optional)")
parser.add_argument("--json", action="store_true", help="Output JSON when appropriate")
args = parser.parse_args(argv)
if zerotier is None:
log("ZeroTier API module not available; ensure API/zerotier.py is importable and zerotier or zerotier-cli is installed")
return 1
if args.list:
nets = zerotier.list_networks()
if args.json:
print(json.dumps([n.__dict__ for n in nets], indent=2))
else:
for n in nets:
print(f"{n.id}\t{name:=}{n.name}\t{n.status}\t{n.assigned_addresses}")
return 0
if args.join:
try:
ok = zerotier.join_network(args.join)
print("Joined" if ok else "Failed to join")
return 0 if ok else 2
except Exception as exc:
log(f"Join failed: {exc}")
print(f"Join failed: {exc}")
return 2
if args.leave:
try:
ok = zerotier.leave_network(args.leave)
print("Left" if ok else "Failed to leave")
return 0 if ok else 2
except Exception as exc:
log(f"Leave failed: {exc}")
print(f"Leave failed: {exc}")
return 2
if args.discover:
probes = zerotier.discover_services_on_network(args.discover)
if args.json:
print(json.dumps([p.__dict__ for p in probes], indent=2, default=str))
else:
for p in probes:
print(f"{p.address}:{p.port}{p.path} -> status={p.status_code} hint={p.service_hint}")
return 0
if args.upload:
# Upload a file to the first discovered remote service on the network
if not args.file:
print("ERROR: --file is required for --upload")
return 2
probe = zerotier.find_peer_service(args.upload, service_hint="remote")
if not probe:
print("No remote service found on network")
return 2
base = f"http://{probe.address}:{probe.port}"
try:
import httpx
url = base.rstrip("/") + "/files/upload"
headers = {}
if args.api_key:
headers["X-API-Key"] = args.api_key
with open(args.file, "rb") as fh:
files = {"file": (Path(args.file).name, fh)}
data = []
for t in (args.tag or []):
data.append(("tag", t))
for u in (args.url or []):
data.append(("url", u))
resp = httpx.post(url, files=files, data=data, headers=headers, timeout=30)
print(resp.status_code, resp.text)
return 0 if resp.status_code in (200, 201) else 2
except Exception:
import requests
url = base.rstrip("/") + "/files/upload"
headers = {}
if args.api_key:
headers["X-API-Key"] = args.api_key
with open(args.file, "rb") as fh:
files = {"file": (Path(args.file).name, fh)}
data = []
for t in (args.tag or []):
data.append(("tag", t))
for u in (args.url or []):
data.append(("url", u))
resp = requests.post(url, files=files, data=data, headers=headers, timeout=30)
print(resp.status_code, resp.text)
return 0 if resp.status_code in (200, 201) else 2
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())