fdf
This commit is contained in:
@@ -34,6 +34,8 @@ class Folder(Store):
|
||||
""""""
|
||||
# Track which locations have already been migrated to avoid repeated migrations
|
||||
_migrated_locations = set()
|
||||
# Cache scan results to avoid repeated full scans across repeated instantiations
|
||||
_scan_cache: Dict[str, Tuple[bool, str, Dict[str, int]]] = {}
|
||||
|
||||
def __new__(cls, *args: Any, **kwargs: Any) -> "Folder":
|
||||
return super().__new__(cls)
|
||||
@@ -55,10 +57,16 @@ class Folder(Store):
|
||||
|
||||
self._location = location
|
||||
self._name = name
|
||||
|
||||
# Scan status (set during init)
|
||||
self.scan_ok: bool = True
|
||||
self.scan_detail: str = ""
|
||||
self.scan_stats: Dict[str, int] = {}
|
||||
|
||||
if self._location:
|
||||
try:
|
||||
from API.folder import API_folder_store
|
||||
from API.folder import LocalLibraryInitializer
|
||||
from pathlib import Path
|
||||
location_path = Path(self._location).expanduser()
|
||||
|
||||
@@ -69,6 +77,29 @@ class Folder(Store):
|
||||
|
||||
# Call migration and discovery at startup
|
||||
Folder.migrate_location(self._location)
|
||||
|
||||
# Local library scan/index (one-time per location per process)
|
||||
location_key = str(location_path)
|
||||
cached = Folder._scan_cache.get(location_key)
|
||||
if cached is None:
|
||||
try:
|
||||
initializer = LocalLibraryInitializer(location_path)
|
||||
stats = initializer.scan_and_index() or {}
|
||||
files_new = int(stats.get('files_new', 0) or 0)
|
||||
sidecars = int(stats.get('sidecars_imported', 0) or 0)
|
||||
total_db = int(stats.get('files_total_db', 0) or 0)
|
||||
if files_new > 0 or sidecars > 0:
|
||||
detail = f"New: {files_new}, Sidecars: {sidecars}" + (f" (Total: {total_db})" if total_db else "")
|
||||
else:
|
||||
detail = ("Up to date" + (f" (Total: {total_db})" if total_db else ""))
|
||||
Folder._scan_cache[location_key] = (True, detail, dict(stats))
|
||||
except Exception as exc:
|
||||
Folder._scan_cache[location_key] = (False, f"Scan failed: {exc}", {})
|
||||
|
||||
ok, detail, stats = Folder._scan_cache.get(location_key, (True, "", {}))
|
||||
self.scan_ok = bool(ok)
|
||||
self.scan_detail = str(detail or "")
|
||||
self.scan_stats = dict(stats or {})
|
||||
except Exception as exc:
|
||||
debug(f"Failed to initialize database for '{name}': {exc}")
|
||||
|
||||
@@ -87,12 +118,11 @@ class Folder(Store):
|
||||
return
|
||||
|
||||
cls._migrated_locations.add(location_str)
|
||||
|
||||
# Create a temporary instance just to call the migration
|
||||
temp_instance = cls(location=location)
|
||||
temp_instance._migrate_to_hash_storage(location_path)
|
||||
|
||||
def _migrate_to_hash_storage(self, location_path: Path) -> None:
|
||||
cls._migrate_to_hash_storage(location_path)
|
||||
|
||||
@classmethod
|
||||
def _migrate_to_hash_storage(cls, location_path: Path) -> None:
|
||||
"""Migrate existing files from filename-based to hash-based storage.
|
||||
|
||||
Checks for sidecars (.metadata, .tag) and imports them before renaming.
|
||||
@@ -158,6 +188,15 @@ class Folder(Store):
|
||||
if hash_path != file_path and not hash_path.exists():
|
||||
debug(f"Migrating: {file_path.name} -> {hash_filename}", file=sys.stderr)
|
||||
file_path.rename(hash_path)
|
||||
|
||||
# Ensure DB points to the renamed path (update by hash).
|
||||
try:
|
||||
cursor.execute(
|
||||
"UPDATE files SET file_path = ?, updated_at = CURRENT_TIMESTAMP WHERE hash = ?",
|
||||
(str(hash_path.resolve()), file_hash),
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Create or update database entry
|
||||
db.get_or_create_file_entry(hash_path)
|
||||
|
||||
@@ -5,17 +5,22 @@ import sys
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
import httpx
|
||||
|
||||
from SYS.logger import debug, log
|
||||
from SYS.utils_constant import mime_maps
|
||||
|
||||
from Store._base import Store
|
||||
|
||||
|
||||
_HYDRUS_INIT_CHECK_CACHE: dict[tuple[str, str], tuple[bool, Optional[str]]] = {}
|
||||
|
||||
|
||||
class HydrusNetwork(Store):
|
||||
"""File storage backend for Hydrus client.
|
||||
|
||||
Each instance represents a specific Hydrus client connection.
|
||||
Maintains its own HydrusClient with session key.
|
||||
Maintains its own HydrusClient.
|
||||
"""
|
||||
|
||||
def __new__(cls, *args: Any, **kwargs: Any) -> "HydrusNetwork":
|
||||
@@ -64,22 +69,67 @@ class HydrusNetwork(Store):
|
||||
|
||||
self.NAME = instance_name
|
||||
self.API = api_key
|
||||
self.URL = url
|
||||
# Create persistent client with session key for this instance
|
||||
self._client = HydrusClient(url=url, access_key=api_key)
|
||||
self.URL = url.rstrip("/")
|
||||
|
||||
# Self health-check: acquire a session key immediately so broken configs
|
||||
# fail-fast and the registry can skip registering this backend.
|
||||
try:
|
||||
if self._client is not None:
|
||||
self._client.ensure_session_key()
|
||||
except Exception as exc:
|
||||
# Best-effort cleanup so partially constructed objects don't linger.
|
||||
# Total count (best-effort, used for startup diagnostics)
|
||||
self.total_count: Optional[int] = None
|
||||
|
||||
# Self health-check: validate the URL is reachable and the access key is accepted.
|
||||
# This MUST NOT attempt to acquire a session key.
|
||||
cache_key = (self.URL, self.API)
|
||||
cached = _HYDRUS_INIT_CHECK_CACHE.get(cache_key)
|
||||
if cached is not None:
|
||||
ok, err = cached
|
||||
if not ok:
|
||||
raise RuntimeError(f"Hydrus '{self.NAME}' unavailable: {err or 'Unavailable'}")
|
||||
else:
|
||||
api_version_url = f"{self.URL}/api_version"
|
||||
verify_key_url = f"{self.URL}/verify_access_key"
|
||||
try:
|
||||
self._client = None
|
||||
except Exception:
|
||||
pass
|
||||
raise RuntimeError(f"Hydrus '{self.NAME}' unavailable: {exc}") from exc
|
||||
with httpx.Client(timeout=5.0, verify=False, follow_redirects=True) as client:
|
||||
version_resp = client.get(api_version_url)
|
||||
version_resp.raise_for_status()
|
||||
version_payload = version_resp.json()
|
||||
if not isinstance(version_payload, dict):
|
||||
raise RuntimeError("Hydrus /api_version returned an unexpected response")
|
||||
|
||||
verify_resp = client.get(
|
||||
verify_key_url,
|
||||
headers={"Hydrus-Client-API-Access-Key": self.API},
|
||||
)
|
||||
verify_resp.raise_for_status()
|
||||
verify_payload = verify_resp.json()
|
||||
if not isinstance(verify_payload, dict):
|
||||
raise RuntimeError("Hydrus /verify_access_key returned an unexpected response")
|
||||
|
||||
_HYDRUS_INIT_CHECK_CACHE[cache_key] = (True, None)
|
||||
except Exception as exc:
|
||||
err = str(exc)
|
||||
_HYDRUS_INIT_CHECK_CACHE[cache_key] = (False, err)
|
||||
raise RuntimeError(f"Hydrus '{self.NAME}' unavailable: {err}") from exc
|
||||
|
||||
# Create a persistent client for this instance (auth via access key by default).
|
||||
self._client = HydrusClient(url=self.URL, access_key=self.API)
|
||||
|
||||
# Best-effort total count (fast on Hydrus side; does not fetch IDs/hashes).
|
||||
try:
|
||||
payload = self._client.search_files(
|
||||
tags=["system:everything"],
|
||||
return_hashes=False,
|
||||
return_file_ids=False,
|
||||
return_file_count=True,
|
||||
)
|
||||
count_val = None
|
||||
if isinstance(payload, dict):
|
||||
count_val = payload.get("file_count")
|
||||
if count_val is None:
|
||||
count_val = payload.get("file_count_inclusive")
|
||||
if count_val is None:
|
||||
count_val = payload.get("num_files")
|
||||
if isinstance(count_val, int):
|
||||
self.total_count = count_val
|
||||
except Exception as exc:
|
||||
debug(f"Hydrus total count unavailable for '{self.NAME}': {exc}", file=sys.stderr)
|
||||
|
||||
def name(self) -> str:
|
||||
return self.NAME
|
||||
|
||||
@@ -22,6 +22,11 @@ from SYS.logger import debug
|
||||
from Store._base import Store as BaseStore
|
||||
|
||||
|
||||
# Backends that failed to initialize earlier in the current process.
|
||||
# Keyed by (store_type, instance_key) where instance_key is the name used under config.store.<type>.<instance_key>.
|
||||
_FAILED_BACKEND_CACHE: Dict[tuple[str, str], str] = {}
|
||||
|
||||
|
||||
def _normalize_store_type(value: str) -> str:
|
||||
return "".join(ch.lower() for ch in str(value or "") if ch.isalnum())
|
||||
|
||||
@@ -111,6 +116,7 @@ class Store:
|
||||
self._config = config or {}
|
||||
self._suppress_debug = suppress_debug
|
||||
self._backends: Dict[str, BaseStore] = {}
|
||||
self._backend_errors: Dict[str, str] = {}
|
||||
self._load_backends()
|
||||
|
||||
def _load_backends(self) -> None:
|
||||
@@ -131,6 +137,18 @@ class Store:
|
||||
continue
|
||||
|
||||
for instance_name, instance_config in instances.items():
|
||||
backend_name = str(instance_name)
|
||||
|
||||
# If this backend already failed earlier in this process, skip re-instantiation.
|
||||
cache_key = (store_type, str(instance_name))
|
||||
cached_error = _FAILED_BACKEND_CACHE.get(cache_key)
|
||||
if cached_error:
|
||||
self._backend_errors[str(instance_name)] = str(cached_error)
|
||||
if isinstance(instance_config, dict):
|
||||
override_name = _get_case_insensitive(dict(instance_config), "NAME")
|
||||
if override_name:
|
||||
self._backend_errors[str(override_name)] = str(cached_error)
|
||||
continue
|
||||
try:
|
||||
kwargs = _build_kwargs(store_cls, str(instance_name), instance_config)
|
||||
|
||||
@@ -144,11 +162,17 @@ class Store:
|
||||
backend_name = str(kwargs.get("NAME") or instance_name)
|
||||
self._backends[backend_name] = backend
|
||||
except Exception as exc:
|
||||
err_text = str(exc)
|
||||
self._backend_errors[str(instance_name)] = err_text
|
||||
_FAILED_BACKEND_CACHE[cache_key] = err_text
|
||||
if not self._suppress_debug:
|
||||
debug(
|
||||
f"[Store] Failed to register {store_cls.__name__} instance '{instance_name}': {exc}"
|
||||
)
|
||||
|
||||
def get_backend_error(self, backend_name: str) -> Optional[str]:
|
||||
return self._backend_errors.get(str(backend_name))
|
||||
|
||||
def list_backends(self) -> list[str]:
|
||||
return sorted(self._backends.keys())
|
||||
|
||||
|
||||
Reference in New Issue
Block a user