diff --git a/.style.yapf b/.style.yapf
new file mode 100644
index 0000000..cb17f8f
--- /dev/null
+++ b/.style.yapf
@@ -0,0 +1,17 @@
+[style]
+based_on_style = pep8
+column_limit = 88
+indent_width = 4
+
+# Prefer splitting complex or comma-separated argument lists into one-per-line
+split_all_comma_separated_values = true
+split_before_first_argument = true
+split_arguments_when_comma_terminated = true
+split_before_named_assigns = true
+allow_split_before_default_or_named_assigns = true
+
+# Make dictionaries and dict literals easier to read
+each_dict_entry_on_separate_line = true
+force_multiline_dict = true
+
+dedent_closing_brackets = true
diff --git a/.vscode/settings.json b/.vscode/settings.json
index f48b071..cb73650 100644
--- a/.vscode/settings.json
+++ b/.vscode/settings.json
@@ -2,8 +2,8 @@
"editor.formatOnSave": true,
"editor.defaultFormatter": "ms-python.python",
- "python.formatting.provider": "black",
- "python.formatting.blackArgs": ["--line-length", "88"],
+ "python.formatting.provider": "yapf",
+ "python.formatting.yapfArgs": ["--style", ".style.yapf"],
"[python]": {
"editor.defaultFormatter": "ms-python.python",
diff --git a/.yapfignore b/.yapfignore
new file mode 100644
index 0000000..fb8d2c8
--- /dev/null
+++ b/.yapfignore
@@ -0,0 +1,90 @@
+# .yapfignore - keep YAPF from formatting files that are ignored by git
+# Generated from the project's .gitignore. Update .gitignore and re-run
+# the formatter setup if you need to change ignored paths.
+
+__pycache__/
+*.py[cod]
+*$py.class
+config.conf
+config.d/
+*.so
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+test_*
+*.manifest
+*.spec
+cookies.txt
+pip-log.txt
+pip-delete-this-directory.txt
+backup/
+
+# Coverage / test artifacts
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Docs / build
+docs/_build/
+
+# Notebooks
+.ipynb_checkpoints
+
+# Virtualenvs / environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+__pypackages__/
+
+# IDE / editor
+.spyderproject
+.spyproject
+.vscode/
+.idea/
+
+# Type/checker caches
+.mypy_cache/
+.dmypy.json
+dmypy.json
+.pyre/
+.pytype/
+
+# Misc build and distribution
+cython_debug/
+MPV/ffmpeg/*
+Log/
+Log/medeia_macina/telegram.session
+*.session
+example.py
+test*
+MPV/portable_config/watch_later*
+hydrusnetwork
diff --git a/API/HTTP.py b/API/HTTP.py
index 6c6d018..a6a8f1c 100644
--- a/API/HTTP.py
+++ b/API/HTTP.py
@@ -31,7 +31,8 @@ class HTTPClient:
retries: int = DEFAULT_RETRIES,
user_agent: str = DEFAULT_USER_AGENT,
verify_ssl: bool = True,
- headers: Optional[Dict[str, str]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
):
"""
Initialize HTTP client.
@@ -67,15 +68,19 @@ class HTTPClient:
def _get_headers(self) -> Dict[str, str]:
"""Get request headers with user-agent."""
- headers = {"User-Agent": self.user_agent}
+ headers = {
+ "User-Agent": self.user_agent
+ }
headers.update(self.base_headers)
return headers
def get(
self,
url: str,
- params: Optional[Dict[str, Any]] = None,
- headers: Optional[Dict[str, str]] = None,
+ params: Optional[Dict[str,
+ Any]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
allow_redirects: bool = True,
) -> httpx.Response:
"""
@@ -104,7 +109,8 @@ class HTTPClient:
data: Optional[Any] = None,
json: Optional[Dict] = None,
files: Optional[Dict] = None,
- headers: Optional[Dict[str, str]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
) -> httpx.Response:
"""
Make a POST request.
@@ -135,7 +141,8 @@ class HTTPClient:
json: Optional[Dict] = None,
content: Optional[Any] = None,
files: Optional[Dict] = None,
- headers: Optional[Dict[str, str]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
) -> httpx.Response:
"""
Make a PUT request.
@@ -164,7 +171,8 @@ class HTTPClient:
def delete(
self,
url: str,
- headers: Optional[Dict[str, str]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
) -> httpx.Response:
"""
Make a DELETE request.
@@ -201,8 +209,11 @@ class HTTPClient:
url: str,
file_path: str,
chunk_size: int = 8192,
- progress_callback: Optional[Callable[[int, int], None]] = None,
- headers: Optional[Dict[str, str]] = None,
+ progress_callback: Optional[Callable[[int,
+ int],
+ None]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
) -> Path:
"""
Download a file from URL with optional progress tracking.
@@ -220,7 +231,10 @@ class HTTPClient:
path = Path(file_path)
path.parent.mkdir(parents=True, exist_ok=True)
- with self._request_stream("GET", url, headers=headers, follow_redirects=True) as response:
+ with self._request_stream("GET",
+ url,
+ headers=headers,
+ follow_redirects=True) as response:
response.raise_for_status()
total_bytes = int(response.headers.get("content-length", 0))
bytes_downloaded = 0
@@ -269,7 +283,9 @@ class HTTPClient:
httpx.Response object
"""
if not self._client:
- raise RuntimeError("HTTPClient must be used with context manager (with statement)")
+ raise RuntimeError(
+ "HTTPClient must be used with context manager (with statement)"
+ )
# Merge headers
if "headers" in kwargs and kwargs["headers"]:
@@ -289,7 +305,9 @@ class HTTPClient:
return response
except httpx.TimeoutException as e:
last_exception = e
- logger.warning(f"Timeout on attempt {attempt + 1}/{self.retries}: {url}")
+ logger.warning(
+ f"Timeout on attempt {attempt + 1}/{self.retries}: {url}"
+ )
if attempt < self.retries - 1:
continue
except httpx.HTTPStatusError as e:
@@ -300,7 +318,9 @@ class HTTPClient:
except:
response_text = ""
if log_http_errors:
- logger.error(f"HTTP {e.response.status_code} from {url}: {response_text}")
+ logger.error(
+ f"HTTP {e.response.status_code} from {url}: {response_text}"
+ )
raise
last_exception = e
try:
@@ -321,7 +341,9 @@ class HTTPClient:
continue
if last_exception:
- logger.error(f"Request failed after {self.retries} attempts: {url} - {last_exception}")
+ logger.error(
+ f"Request failed after {self.retries} attempts: {url} - {last_exception}"
+ )
raise last_exception
raise RuntimeError("Request failed after retries")
@@ -329,7 +351,9 @@ class HTTPClient:
def _request_stream(self, method: str, url: str, **kwargs):
"""Make a streaming request."""
if not self._client:
- raise RuntimeError("HTTPClient must be used with context manager (with statement)")
+ raise RuntimeError(
+ "HTTPClient must be used with context manager (with statement)"
+ )
# Merge headers
if "headers" in kwargs and kwargs["headers"]:
@@ -351,7 +375,8 @@ class AsyncHTTPClient:
retries: int = DEFAULT_RETRIES,
user_agent: str = DEFAULT_USER_AGENT,
verify_ssl: bool = True,
- headers: Optional[Dict[str, str]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
):
"""
Initialize async HTTP client.
@@ -387,15 +412,19 @@ class AsyncHTTPClient:
def _get_headers(self) -> Dict[str, str]:
"""Get request headers with user-agent."""
- headers = {"User-Agent": self.user_agent}
+ headers = {
+ "User-Agent": self.user_agent
+ }
headers.update(self.base_headers)
return headers
async def get(
self,
url: str,
- params: Optional[Dict[str, Any]] = None,
- headers: Optional[Dict[str, str]] = None,
+ params: Optional[Dict[str,
+ Any]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
allow_redirects: bool = True,
) -> httpx.Response:
"""
@@ -423,7 +452,8 @@ class AsyncHTTPClient:
url: str,
data: Optional[Any] = None,
json: Optional[Dict] = None,
- headers: Optional[Dict[str, str]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
) -> httpx.Response:
"""
Make an async POST request.
@@ -450,8 +480,11 @@ class AsyncHTTPClient:
url: str,
file_path: str,
chunk_size: int = 8192,
- progress_callback: Optional[Callable[[int, int], None]] = None,
- headers: Optional[Dict[str, str]] = None,
+ progress_callback: Optional[Callable[[int,
+ int],
+ None]] = None,
+ headers: Optional[Dict[str,
+ str]] = None,
) -> Path:
"""
Download a file from URL asynchronously with optional progress tracking.
@@ -497,7 +530,9 @@ class AsyncHTTPClient:
httpx.Response object
"""
if not self._client:
- raise RuntimeError("AsyncHTTPClient must be used with async context manager")
+ raise RuntimeError(
+ "AsyncHTTPClient must be used with async context manager"
+ )
# Merge headers
if "headers" in kwargs and kwargs["headers"]:
@@ -516,7 +551,9 @@ class AsyncHTTPClient:
return response
except httpx.TimeoutException as e:
last_exception = e
- logger.warning(f"Timeout on attempt {attempt + 1}/{self.retries}: {url}")
+ logger.warning(
+ f"Timeout on attempt {attempt + 1}/{self.retries}: {url}"
+ )
if attempt < self.retries - 1:
await asyncio.sleep(0.5) # Brief delay before retry
continue
@@ -527,7 +564,9 @@ class AsyncHTTPClient:
response_text = e.response.text[:500]
except:
response_text = ""
- logger.error(f"HTTP {e.response.status_code} from {url}: {response_text}")
+ logger.error(
+ f"HTTP {e.response.status_code} from {url}: {response_text}"
+ )
raise
last_exception = e
try:
@@ -550,7 +589,9 @@ class AsyncHTTPClient:
continue
if last_exception:
- logger.error(f"Request failed after {self.retries} attempts: {url} - {last_exception}")
+ logger.error(
+ f"Request failed after {self.retries} attempts: {url} - {last_exception}"
+ )
raise last_exception
raise RuntimeError("Request failed after retries")
@@ -558,7 +599,9 @@ class AsyncHTTPClient:
def _request_stream(self, method: str, url: str, **kwargs):
"""Make a streaming request."""
if not self._client:
- raise RuntimeError("AsyncHTTPClient must be used with async context manager")
+ raise RuntimeError(
+ "AsyncHTTPClient must be used with async context manager"
+ )
# Merge headers
if "headers" in kwargs and kwargs["headers"]:
@@ -587,9 +630,16 @@ def post(url: str, **kwargs) -> httpx.Response:
def download(
url: str,
file_path: str,
- progress_callback: Optional[Callable[[int, int], None]] = None,
+ progress_callback: Optional[Callable[[int,
+ int],
+ None]] = None,
**kwargs,
) -> Path:
"""Quick file download without context manager."""
with HTTPClient() as client:
- return client.download(url, file_path, progress_callback=progress_callback, **kwargs)
+ return client.download(
+ url,
+ file_path,
+ progress_callback=progress_callback,
+ **kwargs
+ )
diff --git a/API/HydrusNetwork.py b/API/HydrusNetwork.py
index 58adb89..a0d8354 100644
--- a/API/HydrusNetwork.py
+++ b/API/HydrusNetwork.py
@@ -85,7 +85,8 @@ class HydrusNetwork:
raise ValueError("Hydrus base URL is required")
self.url = self.url.rstrip("/")
parsed = urlsplit(self.url)
- if parsed.scheme not in {"http", "https"}:
+ if parsed.scheme not in {"http",
+ "https"}:
raise ValueError("Hydrus base URL must use http or https")
self.scheme = parsed.scheme
self.hostname = parsed.hostname or "localhost"
@@ -114,7 +115,8 @@ class HydrusNetwork:
return path
def _perform_request(self, spec: HydrusRequestSpec) -> Any:
- headers: dict[str, str] = {}
+ headers: dict[str,
+ str] = {}
# Use session key if available, otherwise use access key
if self._session_key:
@@ -138,7 +140,9 @@ class HydrusNetwork:
content_type = ""
try:
- with HTTPClient(timeout=self.timeout, headers=headers, verify_ssl=False) as client:
+ with HTTPClient(timeout=self.timeout,
+ headers=headers,
+ verify_ssl=False) as client:
response = None
if spec.file_path is not None:
@@ -149,7 +153,8 @@ class HydrusNetwork:
raise FileNotFoundError(error_msg)
file_size = file_path.stat().st_size
- headers["Content-Type"] = spec.content_type or "application/octet-stream"
+ headers["Content-Type"
+ ] = spec.content_type or "application/octet-stream"
# Do not set Content-Length when streaming an iterator body.
# If the file size changes between stat() and read() (or the source is truncated),
# h11 will raise: "Too little data for declared Content-Length".
@@ -239,7 +244,9 @@ class HydrusNetwork:
body = response.content
content_type = response.headers.get("Content-Type", "") or ""
- logger.debug(f"{self._log_prefix()} Response {status} {reason} ({len(body)} bytes)")
+ logger.debug(
+ f"{self._log_prefix()} Response {status} {reason} ({len(body)} bytes)"
+ )
except (httpx.ConnectError, httpx.TimeoutException, httpx.NetworkError) as exc:
msg = f"Hydrus unavailable: {exc}"
@@ -292,7 +299,10 @@ class HydrusNetwork:
# Retry the request with new session key
return self._perform_request(spec)
except Exception as retry_error:
- logger.error(f"{self._log_prefix()} Retry failed: {retry_error}", exc_info=True)
+ logger.error(
+ f"{self._log_prefix()} Retry failed: {retry_error}",
+ exc_info=True
+ )
# If retry fails, raise the original error
raise HydrusRequestError(status, message, payload) from retry_error
@@ -311,7 +321,10 @@ class HydrusNetwork:
Raises HydrusRequestError if the request fails.
"""
if not self.access_key:
- raise HydrusRequestError(401, "Cannot acquire session key: no access key configured")
+ raise HydrusRequestError(
+ 401,
+ "Cannot acquire session key: no access key configured"
+ )
# Temporarily use access key to get session key
original_session_key = self._session_key
@@ -323,7 +336,9 @@ class HydrusNetwork:
if not session_key:
raise HydrusRequestError(
- 500, "Session key response missing 'session_key' field", result
+ 500,
+ "Session key response missing 'session_key' field",
+ result
)
self._session_key = session_key
@@ -345,7 +360,12 @@ class HydrusNetwork:
return self._session_key
return self._acquire_session_key()
- def _get(self, endpoint: str, *, query: dict[str, Any] | None = None) -> dict[str, Any]:
+ def _get(self,
+ endpoint: str,
+ *,
+ query: dict[str,
+ Any] | None = None) -> dict[str,
+ Any]:
spec = HydrusRequestSpec("GET", endpoint, query=query)
return cast(dict[str, Any], self._perform_request(spec))
@@ -353,12 +373,18 @@ class HydrusNetwork:
self,
endpoint: str,
*,
- data: dict[str, Any] | None = None,
+ data: dict[str,
+ Any] | None = None,
file_path: Path | None = None,
content_type: str | None = None,
- ) -> dict[str, Any]:
+ ) -> dict[str,
+ Any]:
spec = HydrusRequestSpec(
- "POST", endpoint, data=data, file_path=file_path, content_type=content_type
+ "POST",
+ endpoint,
+ data=data,
+ file_path=file_path,
+ content_type=content_type
)
return cast(dict[str, Any], self._perform_request(spec))
@@ -397,12 +423,19 @@ class HydrusNetwork:
Required JSON args: {"hashes": [, ...]}
"""
hash_list = self._ensure_hashes(hashes)
- body = {"hashes": hash_list}
+ body = {
+ "hashes": hash_list
+ }
return self._post("/add_files/undelete_files", data=body)
def delete_files(
- self, hashes: Union[str, Iterable[str]], *, reason: str | None = None
- ) -> dict[str, Any]:
+ self,
+ hashes: Union[str,
+ Iterable[str]],
+ *,
+ reason: str | None = None
+ ) -> dict[str,
+ Any]:
"""Delete files in Hydrus.
Hydrus Client API: POST /add_files/delete_files
@@ -410,98 +443,166 @@ class HydrusNetwork:
Optional JSON args: {"reason": "..."}
"""
hash_list = self._ensure_hashes(hashes)
- body: dict[str, Any] = {"hashes": hash_list}
+ body: dict[str,
+ Any] = {
+ "hashes": hash_list
+ }
if isinstance(reason, str) and reason.strip():
body["reason"] = reason.strip()
return self._post("/add_files/delete_files", data=body)
- def clear_file_deletion_record(self, hashes: Union[str, Iterable[str]]) -> dict[str, Any]:
+ def clear_file_deletion_record(self,
+ hashes: Union[str,
+ Iterable[str]]) -> dict[str,
+ Any]:
"""Clear Hydrus's file deletion record for the provided hashes.
Hydrus Client API: POST /add_files/clear_file_deletion_record
Required JSON args: {"hashes": [, ...]}
"""
hash_list = self._ensure_hashes(hashes)
- body = {"hashes": hash_list}
+ body = {
+ "hashes": hash_list
+ }
return self._post("/add_files/clear_file_deletion_record", data=body)
def add_tag(
- self, hash: Union[str, Iterable[str]], tags: Iterable[str], service_name: str
- ) -> dict[str, Any]:
+ self,
+ hash: Union[str,
+ Iterable[str]],
+ tags: Iterable[str],
+ service_name: str
+ ) -> dict[str,
+ Any]:
hash = self._ensure_hashes(hash)
- body = {"hashes": hash, "service_names_to_tags": {service_name: list(tags)}}
+ body = {
+ "hashes": hash,
+ "service_names_to_tags": {
+ service_name: list(tags)
+ }
+ }
return self._post("/add_tags/add_tags", data=body)
def delete_tag(
self,
- file_hashes: Union[str, Iterable[str]],
+ file_hashes: Union[str,
+ Iterable[str]],
tags: Iterable[str],
service_name: str,
*,
action: int = 1,
- ) -> dict[str, Any]:
+ ) -> dict[str,
+ Any]:
hashes = self._ensure_hashes(file_hashes)
body = {
"hashes": hashes,
- "service_names_to_actions_to_tags": {service_name: {action: list(tags)}},
+ "service_names_to_actions_to_tags": {
+ service_name: {
+ action: list(tags)
+ }
+ },
}
return self._post("/add_tags/add_tags", data=body)
def add_tags_by_key(
- self, hash: Union[str, Iterable[str]], tags: Iterable[str], service_key: str
- ) -> dict[str, Any]:
+ self,
+ hash: Union[str,
+ Iterable[str]],
+ tags: Iterable[str],
+ service_key: str
+ ) -> dict[str,
+ Any]:
hash = self._ensure_hashes(hash)
- body = {"hashes": hash, "service_keys_to_tags": {service_key: list(tags)}}
+ body = {
+ "hashes": hash,
+ "service_keys_to_tags": {
+ service_key: list(tags)
+ }
+ }
return self._post("/add_tags/add_tags", data=body)
def delete_tags_by_key(
self,
- file_hashes: Union[str, Iterable[str]],
+ file_hashes: Union[str,
+ Iterable[str]],
tags: Iterable[str],
service_key: str,
*,
action: int = 1,
- ) -> dict[str, Any]:
+ ) -> dict[str,
+ Any]:
hashes = self._ensure_hashes(file_hashes)
body = {
"hashes": hashes,
- "service_keys_to_actions_to_tags": {service_key: {action: list(tags)}},
+ "service_keys_to_actions_to_tags": {
+ service_key: {
+ action: list(tags)
+ }
+ },
}
return self._post("/add_tags/add_tags", data=body)
- def associate_url(self, file_hashes: Union[str, Iterable[str]], url: str) -> dict[str, Any]:
+ def associate_url(self,
+ file_hashes: Union[str,
+ Iterable[str]],
+ url: str) -> dict[str,
+ Any]:
hashes = self._ensure_hashes(file_hashes)
if len(hashes) == 1:
- body = {"hash": hashes[0], "url_to_add": url}
+ body = {
+ "hash": hashes[0],
+ "url_to_add": url
+ }
return self._post("/add_urls/associate_url", data=body)
- results: dict[str, Any] = {}
+ results: dict[str,
+ Any] = {}
for file_hash in hashes:
- body = {"hash": file_hash, "url_to_add": url}
+ body = {
+ "hash": file_hash,
+ "url_to_add": url
+ }
results[file_hash] = self._post("/add_urls/associate_url", data=body)
- return {"batched": results}
+ return {
+ "batched": results
+ }
- def delete_url(self, file_hashes: Union[str, Iterable[str]], url: str) -> dict[str, Any]:
+ def delete_url(self,
+ file_hashes: Union[str,
+ Iterable[str]],
+ url: str) -> dict[str,
+ Any]:
hashes = self._ensure_hashes(file_hashes)
if len(hashes) == 1:
- body = {"hash": hashes[0], "url_to_delete": url}
+ body = {
+ "hash": hashes[0],
+ "url_to_delete": url
+ }
return self._post("/add_urls/associate_url", data=body)
- results: dict[str, Any] = {}
+ results: dict[str,
+ Any] = {}
for file_hash in hashes:
- body = {"hash": file_hash, "url_to_delete": url}
+ body = {
+ "hash": file_hash,
+ "url_to_delete": url
+ }
results[file_hash] = self._post("/add_urls/associate_url", data=body)
- return {"batched": results}
+ return {
+ "batched": results
+ }
def set_notes(
self,
file_hash: str,
- notes: dict[str, str],
+ notes: dict[str,
+ str],
*,
merge_cleverly: bool = False,
extend_existing_note_if_possible: bool = True,
conflict_resolution: int = 3,
- ) -> dict[str, Any]:
+ ) -> dict[str,
+ Any]:
"""Add or update notes associated with a file.
Hydrus Client API: POST /add_notes/set_notes
@@ -514,11 +615,17 @@ class HydrusNetwork:
if not file_hash:
raise ValueError("file_hash must not be empty")
- body: dict[str, Any] = {"hash": file_hash, "notes": notes}
+ body: dict[str,
+ Any] = {
+ "hash": file_hash,
+ "notes": notes
+ }
if merge_cleverly:
body["merge_cleverly"] = True
- body["extend_existing_note_if_possible"] = bool(extend_existing_note_if_possible)
+ body["extend_existing_note_if_possible"] = bool(
+ extend_existing_note_if_possible
+ )
body["conflict_resolution"] = int(conflict_resolution)
return self._post("/add_notes/set_notes", data=body)
@@ -526,7 +633,8 @@ class HydrusNetwork:
self,
file_hash: str,
note_names: Sequence[str],
- ) -> dict[str, Any]:
+ ) -> dict[str,
+ Any]:
"""Delete notes associated with a file.
Hydrus Client API: POST /add_notes/delete_notes
@@ -540,20 +648,30 @@ class HydrusNetwork:
if not file_hash:
raise ValueError("file_hash must not be empty")
- body = {"hash": file_hash, "note_names": names}
+ body = {
+ "hash": file_hash,
+ "note_names": names
+ }
return self._post("/add_notes/delete_notes", data=body)
def get_file_relationships(self, file_hash: str) -> dict[str, Any]:
- query = {"hash": file_hash}
- return self._get("/manage_file_relationships/get_file_relationships", query=query)
+ query = {
+ "hash": file_hash
+ }
+ return self._get(
+ "/manage_file_relationships/get_file_relationships",
+ query=query
+ )
def set_relationship(
self,
hash_a: str,
hash_b: str,
- relationship: Union[str, int],
+ relationship: Union[str,
+ int],
do_default_content_merge: bool = False,
- ) -> dict[str, Any]:
+ ) -> dict[str,
+ Any]:
"""Set a relationship between two files in Hydrus.
This wraps Hydrus Client API: POST /manage_file_relationships/set_file_relationships.
@@ -609,7 +727,10 @@ class HydrusNetwork:
# Hydrus does not accept 'king' as a relationship; this maps to 'A is better'.
"king": 4,
}
- relationship = rel_map.get(relationship.lower().strip(), 3) # Default to alternates
+ relationship = rel_map.get(
+ relationship.lower().strip(),
+ 3
+ ) # Default to alternates
body = {
"relationships": [
@@ -621,7 +742,10 @@ class HydrusNetwork:
}
]
}
- return self._post("/manage_file_relationships/set_file_relationships", data=body)
+ return self._post(
+ "/manage_file_relationships/set_file_relationships",
+ data=body
+ )
def get_services(self) -> dict[str, Any]:
return self._get("/get_services")
@@ -639,17 +763,24 @@ class HydrusNetwork:
file_sort_type: int | None = None,
file_sort_asc: bool | None = None,
file_sort_key: str | None = None,
- ) -> dict[str, Any]:
+ ) -> dict[str,
+ Any]:
if not tags:
raise ValueError("tags must not be empty")
- query: dict[str, Any] = {}
+ query: dict[str,
+ Any] = {}
query_fields = [
- ("tags", tags, lambda v: json.dumps(list(v))),
- ("file_service_name", file_service_name, lambda v: v),
- ("return_hashes", return_hashes, lambda v: "true" if v else None),
- ("return_file_ids", return_file_ids, lambda v: "true" if v else None),
- ("return_file_count", return_file_count, lambda v: "true" if v else None),
+ ("tags",
+ tags, lambda v: json.dumps(list(v))),
+ ("file_service_name",
+ file_service_name, lambda v: v),
+ ("return_hashes",
+ return_hashes, lambda v: "true" if v else None),
+ ("return_file_ids",
+ return_file_ids, lambda v: "true" if v else None),
+ ("return_file_count",
+ return_file_count, lambda v: "true" if v else None),
(
"include_current_tags",
include_current_tags,
@@ -660,13 +791,17 @@ class HydrusNetwork:
include_pending_tags,
lambda v: "true" if v else "false" if v is not None else None,
),
- ("file_sort_type", file_sort_type, lambda v: str(v) if v is not None else None),
+ (
+ "file_sort_type",
+ file_sort_type, lambda v: str(v) if v is not None else None
+ ),
(
"file_sort_asc",
file_sort_asc,
lambda v: "true" if v else "false" if v is not None else None,
),
- ("file_sort_key", file_sort_key, lambda v: v),
+ ("file_sort_key",
+ file_sort_key, lambda v: v),
]
for key, value, formatter in query_fields:
@@ -689,24 +824,33 @@ class HydrusNetwork:
include_size: bool = True,
include_mime: bool = False,
include_notes: bool = False,
- ) -> dict[str, Any]:
+ ) -> dict[str,
+ Any]:
if not file_ids and not hashes:
raise ValueError("Either file_ids or hashes must be provided")
- query: dict[str, Any] = {}
+ query: dict[str,
+ Any] = {}
query_fields = [
- ("file_ids", file_ids, lambda v: json.dumps(list(v))),
- ("hashes", hashes, lambda v: json.dumps(list(v))),
+ ("file_ids",
+ file_ids, lambda v: json.dumps(list(v))),
+ ("hashes",
+ hashes, lambda v: json.dumps(list(v))),
(
"include_service_keys_to_tags",
include_service_keys_to_tags,
lambda v: "true" if v else None,
),
- ("include_file_url", include_file_url, lambda v: "true" if v else None),
- ("include_duration", include_duration, lambda v: "true" if v else None),
- ("include_size", include_size, lambda v: "true" if v else None),
- ("include_mime", include_mime, lambda v: "true" if v else None),
- ("include_notes", include_notes, lambda v: "true" if v else None),
+ ("include_file_url",
+ include_file_url, lambda v: "true" if v else None),
+ ("include_duration",
+ include_duration, lambda v: "true" if v else None),
+ ("include_size",
+ include_size, lambda v: "true" if v else None),
+ ("include_mime",
+ include_mime, lambda v: "true" if v else None),
+ ("include_notes",
+ include_notes, lambda v: "true" if v else None),
]
for key, value, formatter in query_fields:
@@ -720,7 +864,9 @@ class HydrusNetwork:
def get_file_path(self, file_hash: str) -> dict[str, Any]:
"""Get the local file system path for a given file hash."""
- query = {"hash": file_hash}
+ query = {
+ "hash": file_hash
+ }
return self._get("/get_files/file_path", query=query)
def file_url(self, file_hash: str) -> str:
@@ -752,7 +898,10 @@ class HydrusCliOptions:
debug: bool = False
@classmethod
- def from_namespace(cls: Type[HydrusCliOptionsT], namespace: Any) -> HydrusCliOptionsT:
+ def from_namespace(
+ cls: Type[HydrusCliOptionsT],
+ namespace: Any
+ ) -> HydrusCliOptionsT:
accept_header = namespace.accept or "application/cbor"
body_bytes: bytes | None = None
body_path: Path | None = None
@@ -785,7 +934,8 @@ def hydrus_request(args, parser) -> int:
if not parsed.hostname:
parser.error("Invalid Hydrus URL")
- headers: dict[str, str] = {}
+ headers: dict[str,
+ str] = {}
if options.access_key:
headers["Hydrus-Client-API-Access-Key"] = options.access_key
if options.accept:
@@ -797,7 +947,10 @@ def hydrus_request(args, parser) -> int:
body_path = options.body_path
if not body_path.is_file():
parser.error(f"File not found: {body_path}")
- headers.setdefault("Content-Type", options.content_type or "application/octet-stream")
+ headers.setdefault(
+ "Content-Type",
+ options.content_type or "application/octet-stream"
+ )
headers["Content-Length"] = str(body_path.stat().st_size)
elif options.body_bytes is not None:
request_body_bytes = options.body_bytes
@@ -820,13 +973,17 @@ def hydrus_request(args, parser) -> int:
port = 443 if parsed.scheme == "https" else 80
connection_cls = (
- http.client.HTTPSConnection if parsed.scheme == "https" else http.client.HTTPConnection
+ http.client.HTTPSConnection
+ if parsed.scheme == "https" else http.client.HTTPConnection
)
host = parsed.hostname or "localhost"
connection = connection_cls(host, port, timeout=options.timeout)
if options.debug:
- log(f"Hydrus connecting to {parsed.scheme}://{host}:{port}{path}", file=sys.stderr)
+ log(
+ f"Hydrus connecting to {parsed.scheme}://{host}:{port}{path}",
+ file=sys.stderr
+ )
response_bytes: bytes = b""
content_type = ""
status = 0
@@ -835,12 +992,17 @@ def hydrus_request(args, parser) -> int:
with body_path.open("rb") as handle:
if options.debug:
size_hint = headers.get("Content-Length", "unknown")
- log(f"Hydrus sending file body ({size_hint} bytes)", file=sys.stderr)
+ log(
+ f"Hydrus sending file body ({size_hint} bytes)",
+ file=sys.stderr
+ )
connection.putrequest(options.method, path)
host_header = host
- if (parsed.scheme == "http" and port not in (80, None)) or (
- parsed.scheme == "https" and port not in (443, None)
- ):
+ if (parsed.scheme == "http"
+ and port not in (80,
+ None)) or (parsed.scheme == "https"
+ and port not in (443,
+ None)):
host_header = f"{host}:{port}"
connection.putheader("Host", host_header)
for key, value in headers.items():
@@ -853,20 +1015,34 @@ def hydrus_request(args, parser) -> int:
break
connection.send(chunk)
if options.debug:
- log("[downlow.py] Hydrus upload complete; awaiting response", file=sys.stderr)
+ log(
+ "[downlow.py] Hydrus upload complete; awaiting response",
+ file=sys.stderr
+ )
else:
if options.debug:
- size_hint = "none" if request_body_bytes is None else str(len(request_body_bytes))
+ size_hint = "none" if request_body_bytes is None else str(
+ len(request_body_bytes)
+ )
log(f"Hydrus sending request body bytes={size_hint}", file=sys.stderr)
- sanitized_headers = {k: v for k, v in headers.items() if v}
+ sanitized_headers = {
+ k: v
+ for k, v in headers.items() if v
+ }
connection.request(
- options.method, path, body=request_body_bytes, headers=sanitized_headers
+ options.method,
+ path,
+ body=request_body_bytes,
+ headers=sanitized_headers
)
response = connection.getresponse()
status = response.status
response_bytes = response.read()
if options.debug:
- log(f"Hydrus response received ({len(response_bytes)} bytes)", file=sys.stderr)
+ log(
+ f"Hydrus response received ({len(response_bytes)} bytes)",
+ file=sys.stderr
+ )
content_type = response.getheader("Content-Type", "")
except (OSError, http.client.HTTPException) as exc:
log(f"HTTP error: {exc}", file=sys.stderr)
@@ -890,7 +1066,10 @@ def hydrus_request(args, parser) -> int:
except (json.JSONDecodeError, UnicodeDecodeError):
payload = response_bytes.decode("utf-8", "replace")
elif payload is None and expect_cbor and decode_error is not None:
- log(f"Expected CBOR response but decoding failed: {decode_error}", file=sys.stderr)
+ log(
+ f"Expected CBOR response but decoding failed: {decode_error}",
+ file=sys.stderr
+ )
return 1
json_ready = jsonify(payload) if isinstance(payload, (dict, list)) else payload
if options.debug:
@@ -900,7 +1079,10 @@ def hydrus_request(args, parser) -> int:
elif json_ready is None:
log("{}")
else:
- log(json.dumps({"value": json_ready}, ensure_ascii=False))
+ log(json.dumps({
+ "value": json_ready
+ },
+ ensure_ascii=False))
return 0 if 200 <= status < 400 else 1
@@ -1030,13 +1212,16 @@ def hydrus_export(args, _parser) -> int:
hydrus_url = getattr(args, "hydrus_url", None)
if not hydrus_url:
try:
- from config import load_config, get_hydrus_url
+ from SYS.config import load_config, get_hydrus_url
hydrus_url = get_hydrus_url(load_config())
except Exception as exc:
hydrus_url = None
if os.environ.get("DOWNLOW_DEBUG"):
- log(f"hydrus-export could not load Hydrus URL: {exc}", file=sys.stderr)
+ log(
+ f"hydrus-export could not load Hydrus URL: {exc}",
+ file=sys.stderr
+ )
if hydrus_url:
try:
setattr(args, "hydrus_url", hydrus_url)
@@ -1047,19 +1232,30 @@ def hydrus_export(args, _parser) -> int:
if hydrus_url and file_hash:
try:
client = HydrusNetwork(
- url=hydrus_url, access_key=args.access_key, timeout=args.timeout
+ url=hydrus_url,
+ access_key=args.access_key,
+ timeout=args.timeout
)
- meta_response = client.fetch_file_metadata(hashes=[file_hash], include_mime=True)
- entries = meta_response.get("metadata") if isinstance(meta_response, dict) else None
+ meta_response = client.fetch_file_metadata(
+ hashes=[file_hash],
+ include_mime=True
+ )
+ entries = meta_response.get("metadata") if isinstance(
+ meta_response,
+ dict
+ ) else None
if isinstance(entries, list) and entries:
entry = entries[0]
ext_value = _normalise_ext(
- entry.get("ext") if isinstance(entry, dict) else None
+ entry.get("ext") if isinstance(entry,
+ dict) else None
)
if ext_value:
resolved_suffix = ext_value
else:
- mime_value = entry.get("mime") if isinstance(entry, dict) else None
+ mime_value = entry.get("mime"
+ ) if isinstance(entry,
+ dict) else None
resolved_suffix = _extension_from_mime(mime_value)
except Exception as exc: # pragma: no cover - defensive
if os.environ.get("DOWNLOW_DEBUG"):
@@ -1072,7 +1268,8 @@ def hydrus_export(args, _parser) -> int:
source_suffix = resolved_suffix
suffix = source_suffix or ".hydrus"
- if suffix and output_path.suffix.lower() in {"", ".bin"}:
+ if suffix and output_path.suffix.lower() in {"",
+ ".bin"}:
if output_path.suffix.lower() != suffix.lower():
output_path = output_path.with_suffix(suffix)
target_dir = output_path.parent
@@ -1082,7 +1279,11 @@ def hydrus_export(args, _parser) -> int:
ensure_directory(temp_dir)
except RuntimeError:
temp_dir = target_dir
- temp_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix, dir=str(temp_dir))
+ temp_file = tempfile.NamedTemporaryFile(
+ delete=False,
+ suffix=suffix,
+ dir=str(temp_dir)
+ )
temp_path = Path(temp_file.name)
temp_file.close()
downloaded_bytes = 0
@@ -1090,7 +1291,12 @@ def hydrus_export(args, _parser) -> int:
"Hydrus-Client-API-Access-Key": args.access_key,
}
try:
- downloaded_bytes = download_hydrus_file(args.file_url, headers, temp_path, args.timeout)
+ downloaded_bytes = download_hydrus_file(
+ args.file_url,
+ headers,
+ temp_path,
+ args.timeout
+ )
if os.environ.get("DOWNLOW_DEBUG"):
log(f"hydrus-export downloaded {downloaded_bytes} bytes", file=sys.stderr)
except httpx.RequestError as exc:
@@ -1139,20 +1345,24 @@ def hydrus_export(args, _parser) -> int:
if completed.returncode != 0:
error_details = ffmpeg_log or (completed.stdout or "").strip()
raise RuntimeError(
- f"ffmpeg failed with exit code {completed.returncode}"
- + (f": {error_details}" if error_details else "")
+ f"ffmpeg failed with exit code {completed.returncode}" +
+ (f": {error_details}" if error_details else "")
)
shutil.move(str(converted_tmp), str(final_target))
result_path = final_target
apply_mutagen_metadata(result_path, ffmpeg_metadata, args.format)
result_size = result_path.stat().st_size if result_path.exists() else None
- payload: dict[str, object] = {"output": str(result_path)}
+ payload: dict[str,
+ object] = {
+ "output": str(result_path)
+ }
if downloaded_bytes:
payload["source_bytes"] = downloaded_bytes
if result_size is not None:
payload["size_bytes"] = result_size
if metadata_payload:
- payload["metadata_keys"] = sorted(ffmpeg_metadata.keys()) if ffmpeg_metadata else []
+ payload["metadata_keys"] = sorted(ffmpeg_metadata.keys()
+ ) if ffmpeg_metadata else []
log(json.dumps(payload, ensure_ascii=False))
if ffmpeg_log:
log(ffmpeg_log, file=sys.stderr)
@@ -1179,7 +1389,6 @@ def hydrus_export(args, _parser) -> int:
# This section consolidates functions formerly in hydrus_wrapper.py
# Provides: supported filetypes, client initialization, caching, service resolution
-
# Official Hydrus supported filetypes
# Source: https://hydrusnetwork.github.io/hydrus/filetypes.html
SUPPORTED_FILETYPES = {
@@ -1240,9 +1449,11 @@ SUPPORTED_FILETYPES = {
".pdf": "application/pdf",
".epub": "application/epub+zip",
".djvu": "image/vnd.djvu",
- ".docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
+ ".docx":
+ "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
".xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
- ".pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
+ ".pptx":
+ "application/vnd.openxmlformats-officedocument.presentationml.presentation",
".doc": "application/msword",
".xls": "application/vnd.ms-excel",
".ppt": "application/vnd.ms-powerpoint",
@@ -1271,9 +1482,9 @@ SUPPORTED_FILETYPES = {
# Flatten to get all supported extensions
ALL_SUPPORTED_EXTENSIONS = set(GLOBAL_SUPPORTED_EXTENSIONS)
-
# Global Hydrus client cache to reuse session keys
-_hydrus_client_cache: dict[str, Any] = {}
+_hydrus_client_cache: dict[str,
+ Any] = {}
# Cache Hydrus availability across the session
_HYDRUS_AVAILABLE: Optional[bool] = None
@@ -1287,7 +1498,10 @@ def reset_cache() -> None:
_HYDRUS_UNAVAILABLE_REASON = None
-def is_available(config: dict[str, Any], use_cache: bool = True) -> tuple[bool, Optional[str]]:
+def is_available(config: dict[str,
+ Any],
+ use_cache: bool = True) -> tuple[bool,
+ Optional[str]]:
"""Check if Hydrus is available and accessible.
Performs a lightweight probe to verify:
@@ -1310,7 +1524,7 @@ def is_available(config: dict[str, Any], use_cache: bool = True) -> tuple[bool,
return _HYDRUS_AVAILABLE, _HYDRUS_UNAVAILABLE_REASON
# Use new config helpers first, fallback to old method
- from config import get_hydrus_url, get_hydrus_access_key
+ from SYS.config import get_hydrus_url, get_hydrus_access_key
url = (get_hydrus_url(config, "home") or "").strip()
if not url:
@@ -1399,7 +1613,7 @@ def get_client(config: dict[str, Any]) -> HydrusNetwork:
if not available:
raise RuntimeError(f"Hydrus is unavailable: {reason}")
- from config import get_hydrus_url, get_hydrus_access_key
+ from SYS.config import get_hydrus_url, get_hydrus_access_key
# Use new config helpers
hydrus_url = (get_hydrus_url(config, "home") or "").strip()
@@ -1446,7 +1660,8 @@ def get_tag_service_name(config: dict[str, Any]) -> str:
return "my tags"
-def get_tag_service_key(client: HydrusNetwork, fallback_name: str = "my tags") -> Optional[str]:
+def get_tag_service_key(client: HydrusNetwork,
+ fallback_name: str = "my tags") -> Optional[str]:
"""Get the service key for a named tag service.
Queries the Hydrus client's services and finds the service key matching
@@ -1498,7 +1713,11 @@ CHUNK_SIZE = 1024 * 1024 # 1 MiB
def download_hydrus_file(
- file_url: str, headers: dict[str, str], destination: Path, timeout: float
+ file_url: str,
+ headers: dict[str,
+ str],
+ destination: Path,
+ timeout: float
) -> int:
"""Download *file_url* into *destination* returning the byte count with progress bar."""
from SYS.progress import print_progress, print_final_progress
diff --git a/API/alldebrid.py b/API/alldebrid.py
index b2fff12..d31a673 100644
--- a/API/alldebrid.py
+++ b/API/alldebrid.py
@@ -30,19 +30,24 @@ _SUPPORTED_HOSTERS_CACHE: Optional[Dict[str, Dict[str, Any]]] = None
_CACHE_TIMESTAMP: float = 0
_CACHE_DURATION: float = 3600 # 1 hour
-
# Cache for init-time connectivity checks (api_key fingerprint -> (ok, reason))
-_INIT_CHECK_CACHE: Dict[str, Tuple[bool, Optional[str]]] = {}
+_INIT_CHECK_CACHE: Dict[str,
+ Tuple[bool,
+ Optional[str]]] = {}
def _ping_alldebrid(base_url: str) -> Tuple[bool, Optional[str]]:
"""Ping the AllDebrid API base URL (no API key required)."""
try:
url = str(base_url or "").rstrip("/") + "/ping"
- with HTTPClient(timeout=10.0, headers={"User-Agent": "downlow/1.0"}) as client:
+ with HTTPClient(timeout=10.0,
+ headers={
+ "User-Agent": "downlow/1.0"
+ }) as client:
response = client.get(url)
data = json.loads(response.content.decode("utf-8"))
- if data.get("status") == "success" and data.get("data", {}).get("ping") == "pong":
+ if data.get("status") == "success" and data.get("data",
+ {}).get("ping") == "pong":
return True, None
return False, "Invalid API response"
except Exception as exc:
@@ -107,10 +112,12 @@ class AllDebridClient:
def _request(
self,
endpoint: str,
- params: Optional[Dict[str, Any]] = None,
+ params: Optional[Dict[str,
+ Any]] = None,
*,
method: Optional[str] = None,
- ) -> Dict[str, Any]:
+ ) -> Dict[str,
+ Any]:
"""Make a request to AllDebrid API.
Args:
@@ -157,12 +164,19 @@ class AllDebridClient:
except Exception as req_err:
# Log detailed error info
logger.error(
- f"[AllDebrid] Request error to {endpoint}: {req_err}", exc_info=True
+ f"[AllDebrid] Request error to {endpoint}: {req_err}",
+ exc_info=True
)
- if hasattr(req_err, "response") and req_err.response is not None: # type: ignore
+ if hasattr(req_err,
+ "response"
+ ) and req_err.response is not None: # type: ignore
try:
- error_body = req_err.response.content.decode("utf-8") # type: ignore
- logger.error(f"[AllDebrid] Response body: {error_body[:200]}")
+ error_body = req_err.response.content.decode(
+ "utf-8"
+ ) # type: ignore
+ logger.error(
+ f"[AllDebrid] Response body: {error_body[:200]}"
+ )
except:
pass
raise
@@ -172,7 +186,9 @@ class AllDebridClient:
# Check for API errors
if data.get("status") == "error":
- error_msg = data.get("error", {}).get("message", "Unknown error")
+ error_msg = data.get("error",
+ {}).get("message",
+ "Unknown error")
logger.error(f"[AllDebrid] API error: {error_msg}")
raise AllDebridError(f"AllDebrid API error: {error_msg}")
@@ -200,11 +216,15 @@ class AllDebridClient:
raise AllDebridError(f"Invalid URL: {link}")
try:
- response = self._request("link/unlock", {"link": link})
+ response = self._request("link/unlock",
+ {
+ "link": link
+ })
# Check if unlock was successful
if response.get("status") == "success":
- data = response.get("data", {})
+ data = response.get("data",
+ {})
# AllDebrid returns the download info in 'link' field
if "link" in data:
@@ -251,10 +271,18 @@ class AllDebridClient:
for category in ("hosts", "streams", "redirectors"):
values = domains.get(category)
- if isinstance(values, list) and any(str(d).lower() == host for d in values):
- return {"supported": True, "category": category, "domain": host}
+ if isinstance(values,
+ list) and any(str(d).lower() == host for d in values):
+ return {
+ "supported": True,
+ "category": category,
+ "domain": host
+ }
- return {"supported": False, "domain": host}
+ return {
+ "supported": False,
+ "domain": host
+ }
except AllDebridError:
raise
except Exception as exc:
@@ -274,7 +302,8 @@ class AllDebridClient:
response = self._request("user")
if response.get("status") == "success":
- return response.get("data", {})
+ return response.get("data",
+ {})
return {}
except AllDebridError:
@@ -296,8 +325,10 @@ class AllDebridClient:
response = self._request("hosts/domains")
if response.get("status") == "success":
- data = response.get("data", {})
- return data if isinstance(data, dict) else {}
+ data = response.get("data",
+ {})
+ return data if isinstance(data,
+ dict) else {}
return {}
except AllDebridError:
@@ -331,10 +362,14 @@ class AllDebridClient:
try:
# API endpoint: POST /v4/magnet/upload
# Format: /magnet/upload?apikey=key&magnets[]=magnet:?xt=...
- response = self._request("magnet/upload", {"magnets[]": magnet_uri})
+ response = self._request("magnet/upload",
+ {
+ "magnets[]": magnet_uri
+ })
if response.get("status") == "success":
- data = response.get("data", {})
+ data = response.get("data",
+ {})
magnets = data.get("magnets", [])
if magnets and len(magnets) > 0:
@@ -356,7 +391,10 @@ class AllDebridClient:
except Exception as exc:
raise AllDebridError(f"Failed to submit magnet: {exc}")
- def magnet_status(self, magnet_id: int, include_files: bool = False) -> Dict[str, Any]:
+ def magnet_status(self,
+ magnet_id: int,
+ include_files: bool = False) -> Dict[str,
+ Any]:
"""Get status of a magnet currently being processed or stored.
Status codes:
@@ -396,13 +434,18 @@ class AllDebridClient:
self.base_url = self.BASE_URL_V41
try:
- response = self._request("magnet/status", {"id": str(magnet_id)})
+ response = self._request("magnet/status",
+ {
+ "id": str(magnet_id)
+ })
finally:
self.base_url = old_base
if response.get("status") == "success":
- data = response.get("data", {})
- magnets = data.get("magnets", {})
+ data = response.get("data",
+ {})
+ magnets = data.get("magnets",
+ {})
# Handle both list and dict responses
if isinstance(magnets, list) and len(magnets) > 0:
@@ -439,7 +482,8 @@ class AllDebridClient:
if response.get("status") != "success":
return []
- data = response.get("data", {})
+ data = response.get("data",
+ {})
magnets = data.get("magnets", [])
if isinstance(magnets, list):
@@ -459,8 +503,12 @@ class AllDebridClient:
raise AllDebridError(f"Failed to list magnets: {exc}")
def magnet_status_live(
- self, magnet_id: int, session: Optional[int] = None, counter: int = 0
- ) -> Dict[str, Any]:
+ self,
+ magnet_id: int,
+ session: Optional[int] = None,
+ counter: int = 0
+ ) -> Dict[str,
+ Any]:
"""Get live status of a magnet using delta sync mode.
The live mode endpoint provides real-time progress by only sending
@@ -493,7 +541,10 @@ class AllDebridClient:
old_base = self.base_url
self.base_url = self.BASE_URL_V41
try:
- payload: Dict[str, Any] = {"id": str(magnet_id)}
+ payload: Dict[str,
+ Any] = {
+ "id": str(magnet_id)
+ }
if session is not None:
payload["session"] = str(int(session))
payload["counter"] = str(int(counter))
@@ -502,7 +553,8 @@ class AllDebridClient:
self.base_url = old_base
if response.get("status") == "success":
- data = response.get("data", {})
+ data = response.get("data",
+ {})
magnets = data.get("magnets", [])
# For specific magnet id, return the first match from the array.
@@ -552,7 +604,8 @@ class AllDebridClient:
response = self._request("magnet/files", params)
if response.get("status") == "success":
- data = response.get("data", {})
+ data = response.get("data",
+ {})
magnets = data.get("magnets", [])
# Convert list to dict keyed by ID (as string) for easier access
@@ -603,10 +656,14 @@ class AllDebridClient:
if not hash_value or len(hash_value) < 32:
return None
- response = self._request("magnet/instant", {"magnet": hash_value})
+ response = self._request("magnet/instant",
+ {
+ "magnet": hash_value
+ })
if response.get("status") == "success":
- data = response.get("data", {})
+ data = response.get("data",
+ {})
# Returns 'files' array if available, or empty
return data.get("files", [])
@@ -635,7 +692,10 @@ class AllDebridClient:
raise AllDebridError(f"Invalid magnet ID: {magnet_id}")
try:
- response = self._request("magnet/delete", {"id": str(magnet_id)})
+ response = self._request("magnet/delete",
+ {
+ "id": str(magnet_id)
+ })
if response.get("status") == "success":
return True
@@ -664,7 +724,8 @@ def _get_cached_supported_hosters(api_key: str) -> Set[str]:
now = time.time()
# Return cached result if still valid
- if _SUPPORTED_HOSTERS_CACHE is not None and (now - _CACHE_TIMESTAMP) < _CACHE_DURATION:
+ if _SUPPORTED_HOSTERS_CACHE is not None and (now -
+ _CACHE_TIMESTAMP) < _CACHE_DURATION:
return set(_SUPPORTED_HOSTERS_CACHE.keys())
# Fetch fresh list from API
@@ -686,11 +747,15 @@ def _get_cached_supported_hosters(api_key: str) -> Set[str]:
all_domains.update(hosters_dict["streams"])
# Add redirectors
- if "redirectors" in hosters_dict and isinstance(hosters_dict["redirectors"], list):
+ if "redirectors" in hosters_dict and isinstance(hosters_dict["redirectors"],
+ list):
all_domains.update(hosters_dict["redirectors"])
# Cache as dict for consistency
- _SUPPORTED_HOSTERS_CACHE = {domain: {} for domain in all_domains}
+ _SUPPORTED_HOSTERS_CACHE = {
+ domain: {}
+ for domain in all_domains
+ }
_CACHE_TIMESTAMP = now
if all_domains:
@@ -905,7 +970,8 @@ def unlock_link_cmdlet(result: Any, args: Sequence[str], config: Dict[str, Any])
0 on success, 1 on failure
"""
- def _extract_link_from_args_or_result(result_obj: Any, argv: Sequence[str]) -> Optional[str]:
+ def _extract_link_from_args_or_result(result_obj: Any,
+ argv: Sequence[str]) -> Optional[str]:
# Prefer an explicit URL in args.
for a in argv or []:
if isinstance(a, str) and a.startswith(("http://", "https://")):
@@ -923,7 +989,9 @@ def unlock_link_cmdlet(result: Any, args: Sequence[str], config: Dict[str, Any])
# Current config format
try:
provider_cfg = cfg.get("provider") if isinstance(cfg, dict) else None
- ad_cfg = provider_cfg.get("alldebrid") if isinstance(provider_cfg, dict) else None
+ ad_cfg = provider_cfg.get("alldebrid"
+ ) if isinstance(provider_cfg,
+ dict) else None
api_key = ad_cfg.get("api_key") if isinstance(ad_cfg, dict) else None
if isinstance(api_key, str) and api_key.strip():
return api_key.strip()
@@ -943,7 +1011,11 @@ def unlock_link_cmdlet(result: Any, args: Sequence[str], config: Dict[str, Any])
return None
- def _add_direct_link_to_result(result_obj: Any, direct_link: str, original_link: str) -> None:
+ def _add_direct_link_to_result(
+ result_obj: Any,
+ direct_link: str,
+ original_link: str
+ ) -> None:
if not isinstance(direct_link, str) or not direct_link.strip():
return
if isinstance(result_obj, dict):
@@ -963,7 +1035,10 @@ def unlock_link_cmdlet(result: Any, args: Sequence[str], config: Dict[str, Any])
api_key = _get_alldebrid_api_key_from_config(config)
if not api_key:
- log("AllDebrid API key not configured (provider.alldebrid.api_key)", file=sys.stderr)
+ log(
+ "AllDebrid API key not configured (provider.alldebrid.api_key)",
+ file=sys.stderr
+ )
return 1
# Try to unlock the link
@@ -995,7 +1070,12 @@ def _register_unlock_link():
from cmdlet import register
@register(["unlock-link"])
- def unlock_link_wrapper(result: Any, args: Sequence[str], config: Dict[str, Any]) -> int:
+ def unlock_link_wrapper(
+ result: Any,
+ args: Sequence[str],
+ config: Dict[str,
+ Any]
+ ) -> int:
"""Wrapper to make unlock_link_cmdlet available as cmdlet."""
import pipeline as ctx
diff --git a/API/folder.py b/API/folder.py
index ef5f3c4..573a82a 100644
--- a/API/folder.py
+++ b/API/folder.py
@@ -82,7 +82,6 @@ MEDIA_EXTENSIONS = {
".doc",
}
-
# ============================================================================
# SIDECAR FILE HANDLING
# ============================================================================
@@ -110,7 +109,10 @@ def read_sidecar(sidecar_path: Path) -> Tuple[Optional[str], List[str], List[str
def write_sidecar(
- media_path: Path, tags: List[str], url: List[str], hash_value: Optional[str] = None
+ media_path: Path,
+ tags: List[str],
+ url: List[str],
+ hash_value: Optional[str] = None
) -> bool:
"""Write metadata to a sidecar file.
@@ -230,7 +232,9 @@ class API_folder_store:
# each thread will get its own cursor
# Set a generous timeout to avoid "database is locked" errors during heavy concurrency
self.connection = sqlite3.connect(
- str(self.db_path), check_same_thread=False, timeout=60.0
+ str(self.db_path),
+ check_same_thread=False,
+ timeout=60.0
)
self.connection.row_factory = sqlite3.Row
@@ -335,7 +339,9 @@ class API_folder_store:
cursor.execute("CREATE INDEX IF NOT EXISTS idx_metadata_ext ON metadata(ext)")
cursor.execute("CREATE INDEX IF NOT EXISTS idx_worker_id ON worker(worker_id)")
cursor.execute("CREATE INDEX IF NOT EXISTS idx_worker_status ON worker(status)")
- cursor.execute("CREATE INDEX IF NOT EXISTS idx_worker_type ON worker(worker_type)")
+ cursor.execute(
+ "CREATE INDEX IF NOT EXISTS idx_worker_type ON worker(worker_type)"
+ )
self._migrate_metadata_schema(cursor)
self._migrate_notes_schema(cursor)
@@ -348,7 +354,9 @@ class API_folder_store:
def _ensure_worker_tables(self, cursor) -> None:
"""Ensure the modern worker tables exist, dropping legacy ones if needed."""
- cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND name='worker'")
+ cursor.execute(
+ "SELECT name FROM sqlite_master WHERE type='table' AND name='worker'"
+ )
has_worker = cursor.fetchone() is not None
if not has_worker:
cursor.execute("DROP TABLE IF EXISTS workers")
@@ -402,7 +410,8 @@ class API_folder_store:
"""Backfill columns for older worker tables during upgrade."""
try:
cursor.execute("PRAGMA table_info(worker)")
- existing_columns = {row[1] for row in cursor.fetchall()}
+ existing_columns = {row[1]
+ for row in cursor.fetchall()}
except Exception as exc:
logger.error(f"Error introspecting worker table: {exc}")
return
@@ -421,7 +430,9 @@ class API_folder_store:
cursor.execute(f"ALTER TABLE worker ADD COLUMN {col_name} {ddl}")
logger.info(f"Added '{col_name}' column to worker table")
except Exception as exc:
- logger.warning(f"Could not add '{col_name}' column to worker table: {exc}")
+ logger.warning(
+ f"Could not add '{col_name}' column to worker table: {exc}"
+ )
def _insert_worker_log_entry(
self,
@@ -439,7 +450,11 @@ class API_folder_store:
INSERT INTO worker_log (worker_id, event_type, step, channel, message)
VALUES (?, ?, ?, ?, ?)
""",
- (worker_id, event_type, step, channel, message),
+ (worker_id,
+ event_type,
+ step,
+ channel,
+ message),
)
self._prune_worker_log_entries(cursor, worker_id)
@@ -454,7 +469,8 @@ class API_folder_store:
ORDER BY id DESC
LIMIT 1 OFFSET ?
""",
- (worker_id, WORKER_LOG_MAX_ENTRIES - 1),
+ (worker_id,
+ WORKER_LOG_MAX_ENTRIES - 1),
)
row = cursor.fetchone()
if not row:
@@ -462,10 +478,14 @@ class API_folder_store:
cutoff_id = row[0]
cursor.execute(
"DELETE FROM worker_log WHERE worker_id = ? AND id < ?",
- (worker_id, cutoff_id),
+ (worker_id,
+ cutoff_id),
)
- def get_worker_events(self, worker_id: str, limit: int = 500) -> List[Dict[str, Any]]:
+ def get_worker_events(self,
+ worker_id: str,
+ limit: int = 500) -> List[Dict[str,
+ Any]]:
"""Return chronological worker log events for timelines."""
try:
cursor = self.connection.cursor()
@@ -477,27 +497,43 @@ class API_folder_store:
ORDER BY id ASC
LIMIT ?
""",
- (worker_id, limit),
+ (worker_id,
+ limit),
)
return [dict(row) for row in cursor.fetchall()]
except Exception as exc:
- logger.error(f"Error retrieving worker events for {worker_id}: {exc}", exc_info=True)
+ logger.error(
+ f"Error retrieving worker events for {worker_id}: {exc}",
+ exc_info=True
+ )
return []
- def clear_worker_events(self, worker_id: str, event_type: Optional[str] = None) -> None:
+ def clear_worker_events(
+ self,
+ worker_id: str,
+ event_type: Optional[str] = None
+ ) -> None:
"""Remove worker log entries, optionally filtered by event type."""
try:
cursor = self.connection.cursor()
if event_type:
cursor.execute(
"DELETE FROM worker_log WHERE worker_id = ? AND event_type = ?",
- (worker_id, event_type),
+ (worker_id,
+ event_type),
)
else:
- cursor.execute("DELETE FROM worker_log WHERE worker_id = ?", (worker_id,))
+ cursor.execute(
+ "DELETE FROM worker_log WHERE worker_id = ?",
+ (worker_id,
+ )
+ )
self.connection.commit()
except Exception as exc:
- logger.error(f"Error clearing worker log for {worker_id}: {exc}", exc_info=True)
+ logger.error(
+ f"Error clearing worker log for {worker_id}: {exc}",
+ exc_info=True
+ )
def _migrate_metadata_schema(self, cursor) -> None:
"""Ensure metadata schema is up-to-date.
@@ -508,7 +544,8 @@ class API_folder_store:
try:
# Check if this is a fresh new database (hash-based schema)
cursor.execute("PRAGMA table_info(metadata)")
- existing_columns = {row[1] for row in cursor.fetchall()}
+ existing_columns = {row[1]
+ for row in cursor.fetchall()}
# Legacy migration: If old schema exists, try to import data.
# Old schema would have had: id (INTEGER PRIMARY KEY), file_hash (TEXT), etc.
@@ -518,7 +555,9 @@ class API_folder_store:
"Detected legacy metadata schema - importing to new hash-based schema"
)
# This would be complex legacy migration - for now just note it.
- logger.info("Legacy metadata table detected but import not yet implemented")
+ logger.info(
+ "Legacy metadata table detected but import not yet implemented"
+ )
return
# Unknown/unsupported schema; nothing we can safely do here.
@@ -542,7 +581,9 @@ class API_folder_store:
for col_name, col_def in column_specs.items():
if col_name not in existing_columns:
try:
- cursor.execute(f"ALTER TABLE metadata ADD COLUMN {col_name} {col_def}")
+ cursor.execute(
+ f"ALTER TABLE metadata ADD COLUMN {col_name} {col_def}"
+ )
existing_columns.add(col_name)
logger.info(f"Added '{col_name}' column to metadata table")
except Exception as e:
@@ -553,15 +594,21 @@ class API_folder_store:
try:
from SYS.utils_constant import get_type_from_ext
- cursor.execute("SELECT hash, ext FROM metadata WHERE type IS NULL OR type = ''")
+ cursor.execute(
+ "SELECT hash, ext FROM metadata WHERE type IS NULL OR type = ''"
+ )
rows = cursor.fetchall()
for file_hash, ext in rows:
file_type = get_type_from_ext(ext or "")
cursor.execute(
- "UPDATE metadata SET type = ? WHERE hash = ?", (file_type, file_hash)
+ "UPDATE metadata SET type = ? WHERE hash = ?",
+ (file_type,
+ file_hash)
)
if rows:
- logger.info(f"Populated type column for {len(rows)} metadata entries")
+ logger.info(
+ f"Populated type column for {len(rows)} metadata entries"
+ )
except Exception as e:
logger.debug(f"Could not populate type column: {e}")
@@ -617,13 +664,20 @@ class API_folder_store:
"""
UPDATE metadata SET time_modified = CURRENT_TIMESTAMP WHERE hash = ?
""",
- (file_hash,),
+ (file_hash,
+ ),
)
self.connection.commit()
except Exception as e:
- logger.debug(f"Could not update metadata modified time for hash {file_hash}: {e}")
+ logger.debug(
+ f"Could not update metadata modified time for hash {file_hash}: {e}"
+ )
- def get_or_create_file_entry(self, file_path: Path, file_hash: Optional[str] = None) -> str:
+ def get_or_create_file_entry(
+ self,
+ file_path: Path,
+ file_hash: Optional[str] = None
+ ) -> str:
"""Get or create a file entry in the database and return the hash.
Args:
@@ -646,7 +700,9 @@ class API_folder_store:
cursor = self.connection.cursor()
# Prefer existing entry by path (file_path is UNIQUE in schema).
- cursor.execute("SELECT hash FROM files WHERE file_path = ?", (db_path,))
+ cursor.execute("SELECT hash FROM files WHERE file_path = ?",
+ (db_path,
+ ))
row = cursor.fetchone()
if row and row[0]:
existing_hash = str(row[0])
@@ -661,14 +717,20 @@ class API_folder_store:
return existing_hash
# Check if file entry exists
- cursor.execute("SELECT hash FROM files WHERE hash = ?", (file_hash,))
+ cursor.execute("SELECT hash FROM files WHERE hash = ?",
+ (file_hash,
+ ))
row = cursor.fetchone()
if row:
- logger.debug(f"[get_or_create_file_entry] Found existing file hash: {file_hash}")
+ logger.debug(
+ f"[get_or_create_file_entry] Found existing file hash: {file_hash}"
+ )
return file_hash
- logger.debug(f"[get_or_create_file_entry] File entry not found, creating new one")
+ logger.debug(
+ f"[get_or_create_file_entry] File entry not found, creating new one"
+ )
stat = abs_path.stat()
try:
cursor.execute(
@@ -676,11 +738,17 @@ class API_folder_store:
INSERT INTO files (hash, file_path, file_modified)
VALUES (?, ?, ?)
""",
- (file_hash, db_path, stat.st_mtime),
+ (file_hash,
+ db_path,
+ stat.st_mtime),
)
except sqlite3.IntegrityError:
# Most likely: UNIQUE constraint on file_path. Re-fetch and return.
- cursor.execute("SELECT hash FROM files WHERE file_path = ?", (db_path,))
+ cursor.execute(
+ "SELECT hash FROM files WHERE file_path = ?",
+ (db_path,
+ )
+ )
row2 = cursor.fetchone()
if row2 and row2[0]:
existing_hash = str(row2[0])
@@ -690,7 +758,9 @@ class API_folder_store:
return existing_hash
raise
- logger.debug(f"[get_or_create_file_entry] Created new file entry for hash: {file_hash}")
+ logger.debug(
+ f"[get_or_create_file_entry] Created new file entry for hash: {file_hash}"
+ )
# Auto-create title tag
filename_without_ext = abs_path.stem
@@ -703,7 +773,8 @@ class API_folder_store:
INSERT OR IGNORE INTO tags (hash, tag)
VALUES (?, ?)
""",
- (file_hash, title_tag),
+ (file_hash,
+ title_tag),
)
logger.debug(
f"[get_or_create_file_entry] Auto-created title tag for hash {file_hash}"
@@ -725,7 +796,9 @@ class API_folder_store:
abs_path = self._normalize_input_path(file_path)
str_path = self._to_db_file_path(abs_path)
cursor = self.connection.cursor()
- cursor.execute("SELECT hash FROM files WHERE file_path = ?", (str_path,))
+ cursor.execute("SELECT hash FROM files WHERE file_path = ?",
+ (str_path,
+ ))
row = cursor.fetchone()
return row[0] if row else None
except Exception as e:
@@ -742,7 +815,8 @@ class API_folder_store:
SELECT m.* FROM metadata m
WHERE m.hash = ?
""",
- (file_hash,),
+ (file_hash,
+ ),
)
row = cursor.fetchone()
@@ -767,7 +841,10 @@ class API_folder_store:
return metadata
except Exception as e:
- logger.error(f"Error getting metadata for hash {file_hash}: {e}", exc_info=True)
+ logger.error(
+ f"Error getting metadata for hash {file_hash}: {e}",
+ exc_info=True
+ )
return None
def set_relationship_by_hash(
@@ -796,20 +873,29 @@ class API_folder_store:
cursor = self.connection.cursor()
# Ensure both hashes exist in files table (metadata has FK to files)
- cursor.execute("SELECT 1 FROM files WHERE hash = ?", (file_hash,))
+ cursor.execute("SELECT 1 FROM files WHERE hash = ?",
+ (file_hash,
+ ))
if not cursor.fetchone():
raise ValueError(f"Hash not found in store DB: {file_hash}")
- cursor.execute("SELECT 1 FROM files WHERE hash = ?", (related_file_hash,))
+ cursor.execute("SELECT 1 FROM files WHERE hash = ?",
+ (related_file_hash,
+ ))
if not cursor.fetchone():
raise ValueError(f"Hash not found in store DB: {related_file_hash}")
# Load current relationships for the main file
- cursor.execute("SELECT relationships FROM metadata WHERE hash = ?", (file_hash,))
+ cursor.execute(
+ "SELECT relationships FROM metadata WHERE hash = ?",
+ (file_hash,
+ )
+ )
row = cursor.fetchone()
relationships_str = row[0] if row else None
try:
- relationships = json.loads(relationships_str) if relationships_str else {}
+ relationships = json.loads(relationships_str
+ ) if relationships_str else {}
except (json.JSONDecodeError, TypeError):
relationships = {}
if not isinstance(relationships, dict):
@@ -830,13 +916,16 @@ class API_folder_store:
time_modified = CURRENT_TIMESTAMP,
updated_at = CURRENT_TIMESTAMP
""",
- (file_hash, json.dumps(relationships)),
+ (file_hash,
+ json.dumps(relationships)),
)
if bidirectional:
# Update the related file as well
cursor.execute(
- "SELECT relationships FROM metadata WHERE hash = ?", (related_file_hash,)
+ "SELECT relationships FROM metadata WHERE hash = ?",
+ (related_file_hash,
+ )
)
row2 = cursor.fetchone()
relationships_str2 = row2[0] if row2 else None
@@ -864,7 +953,8 @@ class API_folder_store:
time_modified = CURRENT_TIMESTAMP,
updated_at = CURRENT_TIMESTAMP
""",
- (related_file_hash, json.dumps(reverse_relationships)),
+ (related_file_hash,
+ json.dumps(reverse_relationships)),
)
self.connection.commit()
@@ -888,7 +978,8 @@ class API_folder_store:
JOIN files f ON m.hash = f.hash
WHERE m.relationships LIKE ?
""",
- (f"%{target_hash}%",),
+ (f"%{target_hash}%",
+ ),
)
results: List[Dict[str, Any]] = []
@@ -915,7 +1006,10 @@ class API_folder_store:
)
return results
except Exception as e:
- logger.error(f"Error finding files pointing to hash {target_hash}: {e}", exc_info=True)
+ logger.error(
+ f"Error finding files pointing to hash {target_hash}: {e}",
+ exc_info=True
+ )
return []
def save_metadata(self, file_path: Path, metadata: Dict[str, Any]) -> None:
@@ -979,11 +1073,18 @@ class API_folder_store:
logger.debug(f"[save_metadata] Committed metadata for hash {file_hash}")
except Exception as e:
logger.error(
- f"[save_metadata] ❌ Error saving metadata for {file_path}: {e}", exc_info=True
+ f"[save_metadata] ❌ Error saving metadata for {file_path}: {e}",
+ exc_info=True
)
raise
- def save_file_info(self, file_path: Path, metadata: Dict[str, Any], tags: List[str]) -> None:
+ def save_file_info(
+ self,
+ file_path: Path,
+ metadata: Dict[str,
+ Any],
+ tags: List[str]
+ ) -> None:
"""Save metadata and tags for a file in a single transaction."""
try:
abs_path = self._normalize_input_path(file_path)
@@ -1042,7 +1143,9 @@ class API_folder_store:
# 2. Save Tags
# We assume tags list is complete and includes title if needed
- cursor.execute("DELETE FROM tags WHERE hash = ?", (file_hash,))
+ cursor.execute("DELETE FROM tags WHERE hash = ?",
+ (file_hash,
+ ))
for tag in tags:
tag = tag.strip()
@@ -1052,15 +1155,19 @@ class API_folder_store:
INSERT OR IGNORE INTO tags (hash, tag)
VALUES (?, ?)
""",
- (file_hash, tag),
+ (file_hash,
+ tag),
)
self.connection.commit()
- logger.debug(f"[save_file_info] Committed metadata and tags for hash {file_hash}")
+ logger.debug(
+ f"[save_file_info] Committed metadata and tags for hash {file_hash}"
+ )
except Exception as e:
logger.error(
- f"[save_file_info] ❌ Error saving file info for {file_path}: {e}", exc_info=True
+ f"[save_file_info] ❌ Error saving file info for {file_path}: {e}",
+ exc_info=True
)
raise
@@ -1075,7 +1182,8 @@ class API_folder_store:
WHERE t.hash = ?
ORDER BY t.tag
""",
- (file_hash,),
+ (file_hash,
+ ),
)
return [row[0] for row in cursor.fetchall()]
@@ -1099,22 +1207,28 @@ class API_folder_store:
"""
SELECT tag FROM tags WHERE hash = ? AND tag LIKE 'title:%'
""",
- (file_hash,),
+ (file_hash,
+ ),
)
existing_title = cursor.fetchone()
- cursor.execute("DELETE FROM tags WHERE hash = ?", (file_hash,))
+ cursor.execute("DELETE FROM tags WHERE hash = ?",
+ (file_hash,
+ ))
logger.debug(f"[save_tags] Deleted existing tags for hash {file_hash}")
# Check if new tags provide a title
- new_title_provided = any(str(t).strip().lower().startswith("title:") for t in tags)
+ new_title_provided = any(
+ str(t).strip().lower().startswith("title:") for t in tags
+ )
if existing_title and not new_title_provided:
cursor.execute(
"""
INSERT INTO tags (hash, tag) VALUES (?, ?)
""",
- (file_hash, existing_title[0]),
+ (file_hash,
+ existing_title[0]),
)
logger.debug(f"[save_tags] Preserved existing title tag")
elif not existing_title and not new_title_provided:
@@ -1127,7 +1241,8 @@ class API_folder_store:
"""
INSERT INTO tags (hash, tag) VALUES (?, ?)
""",
- (file_hash, title_tag),
+ (file_hash,
+ title_tag),
)
logger.debug(f"[save_tags] Created auto-title tag: {title_tag}")
@@ -1139,14 +1254,17 @@ class API_folder_store:
INSERT OR IGNORE INTO tags (hash, tag)
VALUES (?, ?)
""",
- (file_hash, tag),
+ (file_hash,
+ tag),
)
self.connection.commit()
logger.debug(f"[save_tags] Committed {len(tags)} tags for hash {file_hash}")
# Verify they were actually saved
- cursor.execute("SELECT COUNT(*) FROM tags WHERE hash = ?", (file_hash,))
+ cursor.execute("SELECT COUNT(*) FROM tags WHERE hash = ?",
+ (file_hash,
+ ))
saved_count = cursor.fetchone()[0]
logger.debug(
f"[save_tags] Verified: {saved_count} tags in database for hash {file_hash}"
@@ -1154,7 +1272,10 @@ class API_folder_store:
self._update_metadata_modified_time(file_hash)
except Exception as e:
- logger.error(f"[save_tags] ❌ Error saving tags for {file_path}: {e}", exc_info=True)
+ logger.error(
+ f"[save_tags] ❌ Error saving tags for {file_path}: {e}",
+ exc_info=True
+ )
raise
def add_tags(self, file_path: Path, tags: List[str]) -> None:
@@ -1164,7 +1285,11 @@ class API_folder_store:
cursor = self.connection.cursor()
user_title_tag = next(
- (tag.strip() for tag in tags if tag.strip().lower().startswith("title:")), None
+ (
+ tag.strip()
+ for tag in tags if tag.strip().lower().startswith("title:")
+ ),
+ None
)
if user_title_tag:
@@ -1172,14 +1297,16 @@ class API_folder_store:
"""
DELETE FROM tags WHERE hash = ? AND tag LIKE 'title:%'
""",
- (file_hash,),
+ (file_hash,
+ ),
)
else:
cursor.execute(
"""
SELECT COUNT(*) FROM tags WHERE hash = ? AND tag LIKE 'title:%'
""",
- (file_hash,),
+ (file_hash,
+ ),
)
has_title = cursor.fetchone()[0] > 0
@@ -1194,7 +1321,8 @@ class API_folder_store:
INSERT OR IGNORE INTO tags (hash, tag)
VALUES (?, ?)
""",
- (file_hash, title_tag),
+ (file_hash,
+ title_tag),
)
for tag in tags:
@@ -1205,7 +1333,8 @@ class API_folder_store:
INSERT OR IGNORE INTO tags (hash, tag)
VALUES (?, ?)
""",
- (file_hash, tag),
+ (file_hash,
+ tag),
)
self.connection.commit()
@@ -1230,7 +1359,8 @@ class API_folder_store:
WHERE hash = ?
AND tag = ?
""",
- (file_hash, tag),
+ (file_hash,
+ tag),
)
self.connection.commit()
@@ -1245,7 +1375,11 @@ class API_folder_store:
cursor = self.connection.cursor()
user_title_tag = next(
- (tag.strip() for tag in tags if tag.strip().lower().startswith("title:")), None
+ (
+ tag.strip()
+ for tag in tags if tag.strip().lower().startswith("title:")
+ ),
+ None
)
if user_title_tag:
@@ -1253,7 +1387,8 @@ class API_folder_store:
"""
DELETE FROM tags WHERE hash = ? AND tag LIKE 'title:%'
""",
- (file_hash,),
+ (file_hash,
+ ),
)
for tag in tags:
@@ -1264,7 +1399,8 @@ class API_folder_store:
INSERT OR IGNORE INTO tags (hash, tag)
VALUES (?, ?)
""",
- (file_hash, tag),
+ (file_hash,
+ tag),
)
self.connection.commit()
@@ -1288,16 +1424,25 @@ class API_folder_store:
WHERE hash = ?
AND tag = ?
""",
- (file_hash, tag),
+ (file_hash,
+ tag),
)
self.connection.commit()
logger.debug(f"Removed {len(tags)} tags for hash {file_hash}")
except Exception as e:
- logger.error(f"Error removing tags for hash {file_hash}: {e}", exc_info=True)
+ logger.error(
+ f"Error removing tags for hash {file_hash}: {e}",
+ exc_info=True
+ )
raise
- def update_metadata_by_hash(self, file_hash: str, metadata_updates: Dict[str, Any]) -> None:
+ def update_metadata_by_hash(
+ self,
+ file_hash: str,
+ metadata_updates: Dict[str,
+ Any]
+ ) -> None:
"""Update metadata for a file by hash."""
try:
cursor = self.connection.cursor()
@@ -1319,7 +1464,8 @@ class API_folder_store:
# This can happen for older DBs or entries created without explicit metadata.
cursor.execute(
"INSERT OR IGNORE INTO metadata (hash) VALUES (?)",
- (file_hash,),
+ (file_hash,
+ ),
)
values.append(file_hash)
@@ -1329,7 +1475,10 @@ class API_folder_store:
self.connection.commit()
logger.debug(f"Updated metadata for hash {file_hash}")
except Exception as e:
- logger.error(f"Error updating metadata for hash {file_hash}: {e}", exc_info=True)
+ logger.error(
+ f"Error updating metadata for hash {file_hash}: {e}",
+ exc_info=True
+ )
raise
def set_relationship(
@@ -1361,7 +1510,8 @@ class API_folder_store:
"""
SELECT relationships FROM metadata WHERE hash = ?
""",
- (file_hash,),
+ (file_hash,
+ ),
)
row = cursor.fetchone()
@@ -1397,7 +1547,8 @@ class API_folder_store:
relationships = excluded.relationships,
time_modified = CURRENT_TIMESTAMP
""",
- (file_hash, json.dumps(relationships)),
+ (file_hash,
+ json.dumps(relationships)),
)
logger.debug(
@@ -1416,7 +1567,8 @@ class API_folder_store:
"""
SELECT relationships FROM metadata WHERE hash = ?
""",
- (related_file_hash,),
+ (related_file_hash,
+ ),
)
row = cursor.fetchone()
@@ -1448,7 +1600,8 @@ class API_folder_store:
relationships = excluded.relationships,
time_modified = CURRENT_TIMESTAMP
""",
- (related_file_hash, json.dumps(reverse_relationships)),
+ (related_file_hash,
+ json.dumps(reverse_relationships)),
)
self.connection.commit()
@@ -1488,7 +1641,10 @@ class API_folder_store:
return self.find_files_pointing_to_hash(target_hash)
except Exception as e:
- logger.error(f"Error finding files pointing to {target_path}: {e}", exc_info=True)
+ logger.error(
+ f"Error finding files pointing to {target_path}: {e}",
+ exc_info=True
+ )
return []
def get_note(self, file_hash: str) -> Optional[str]:
@@ -1508,16 +1664,21 @@ class API_folder_store:
cursor = self.connection.cursor()
cursor.execute(
"SELECT name, note FROM notes WHERE hash = ? ORDER BY name ASC",
- (file_hash,),
+ (file_hash,
+ ),
)
- out: Dict[str, str] = {}
+ out: Dict[str,
+ str] = {}
for name, note in cursor.fetchall() or []:
if not name:
continue
out[str(name)] = str(note or "")
return out
except Exception as e:
- logger.error(f"Error getting notes for hash {file_hash}: {e}", exc_info=True)
+ logger.error(
+ f"Error getting notes for hash {file_hash}: {e}",
+ exc_info=True
+ )
return {}
def save_note(self, file_path: Path, note: str) -> None:
@@ -1541,7 +1702,9 @@ class API_folder_store:
note = excluded.note,
updated_at = CURRENT_TIMESTAMP
""",
- (file_hash, note_name, note),
+ (file_hash,
+ note_name,
+ note),
)
self.connection.commit()
logger.debug(f"Saved note '{note_name}' for {file_path}")
@@ -1558,11 +1721,15 @@ class API_folder_store:
cursor = self.connection.cursor()
cursor.execute(
"DELETE FROM notes WHERE hash = ? AND name = ?",
- (file_hash, note_name),
+ (file_hash,
+ note_name),
)
self.connection.commit()
except Exception as e:
- logger.error(f"Error deleting note '{name}' for hash {file_hash}: {e}", exc_info=True)
+ logger.error(
+ f"Error deleting note '{name}' for hash {file_hash}: {e}",
+ exc_info=True
+ )
raise
def search_by_tag(self, tag: str, limit: int = 100) -> List[tuple]:
@@ -1577,7 +1744,8 @@ class API_folder_store:
WHERE t.tag = ?
LIMIT ?
""",
- (tag, limit),
+ (tag,
+ limit),
)
rows = cursor.fetchall() or []
@@ -1603,7 +1771,8 @@ class API_folder_store:
"""
SELECT file_path FROM files WHERE hash = ?
""",
- (file_hash,),
+ (file_hash,
+ ),
)
row = cursor.fetchone()
@@ -1635,13 +1804,17 @@ class API_folder_store:
UPDATE files SET file_path = ?, updated_at = CURRENT_TIMESTAMP
WHERE file_path = ?
""",
- (str_new_path, str_old_path),
+ (str_new_path,
+ str_old_path),
)
self.connection.commit()
logger.debug(f"Renamed file in database: {old_path} → {new_path}")
except Exception as e:
- logger.error(f"Error renaming file from {old_path} to {new_path}: {e}", exc_info=True)
+ logger.error(
+ f"Error renaming file from {old_path} to {new_path}: {e}",
+ exc_info=True
+ )
raise
def cleanup_missing_files(self) -> int:
@@ -1657,7 +1830,9 @@ class API_folder_store:
except Exception:
abs_path = Path(file_path)
if not abs_path.exists():
- cursor.execute("DELETE FROM files WHERE hash = ?", (file_hash,))
+ cursor.execute("DELETE FROM files WHERE hash = ?",
+ (file_hash,
+ ))
removed_count += 1
self.connection.commit()
@@ -1680,7 +1855,9 @@ class API_folder_store:
cursor = self.connection.cursor()
# Get the hash first (for logging)
- cursor.execute("SELECT hash FROM files WHERE file_path = ?", (str_path,))
+ cursor.execute("SELECT hash FROM files WHERE file_path = ?",
+ (str_path,
+ ))
row = cursor.fetchone()
if not row:
logger.debug(f"File not found in database: {str_path}")
@@ -1692,7 +1869,8 @@ class API_folder_store:
try:
target_hash = str(file_hash or "").strip().lower()
backlinks = self.find_files_pointing_to_hash(target_hash)
- by_src: Dict[str, set[str]] = {}
+ by_src: Dict[str,
+ set[str]] = {}
for b in backlinks:
src = str((b or {}).get("hash") or "").strip().lower()
rt = str((b or {}).get("type") or "").strip()
@@ -1721,7 +1899,8 @@ class API_folder_store:
continue
new_bucket = [
- h for h in bucket if str(h or "").strip().lower() != target_hash
+ h for h in bucket
+ if str(h or "").strip().lower() != target_hash
]
if len(new_bucket) == len(bucket):
continue
@@ -1745,14 +1924,17 @@ class API_folder_store:
time_modified = CURRENT_TIMESTAMP,
updated_at = CURRENT_TIMESTAMP
""",
- (src_hash, json.dumps(rels if rels else {})),
+ (src_hash,
+ json.dumps(rels if rels else {})),
)
except Exception:
# Best-effort cleanup; deletion should still proceed.
pass
# Delete the file entry (cascades to metadata, tags, notes, etc via foreign keys)
- cursor.execute("DELETE FROM files WHERE file_path = ?", (str_path,))
+ cursor.execute("DELETE FROM files WHERE file_path = ?",
+ (str_path,
+ ))
self.connection.commit()
logger.debug(f"Deleted file from database: {str_path} (hash: {file_hash})")
@@ -1782,7 +1964,15 @@ class API_folder_store:
INSERT INTO worker (worker_id, worker_type, pipe, status, title, description, total_steps)
VALUES (?, ?, ?, ?, ?, ?, ?)
""",
- (worker_id, worker_type, pipe, "running", title, description, total_steps),
+ (
+ worker_id,
+ worker_type,
+ pipe,
+ "running",
+ title,
+ description,
+ total_steps
+ ),
)
self.connection.commit()
return cursor.lastrowid or 0
@@ -1809,7 +1999,10 @@ class API_folder_store:
"started_at",
"last_stdout_at",
}
- update_fields = {k: v for k, v in kwargs.items() if k in allowed_fields}
+ update_fields = {
+ k: v
+ for k, v in kwargs.items() if k in allowed_fields
+ }
if not update_fields:
return True
@@ -1844,7 +2037,8 @@ class API_folder_store:
SET status = ?, completed_at = CURRENT_TIMESTAMP, last_updated = CURRENT_TIMESTAMP
WHERE worker_id = ?
""",
- (status, worker_id),
+ (status,
+ worker_id),
)
else:
cursor.execute(
@@ -1853,12 +2047,15 @@ class API_folder_store:
SET status = ?, last_updated = CURRENT_TIMESTAMP
WHERE worker_id = ?
""",
- (status, worker_id),
+ (status,
+ worker_id),
)
self.connection.commit()
- cursor.execute("SELECT id FROM worker WHERE worker_id = ?", (worker_id,))
+ cursor.execute("SELECT id FROM worker WHERE worker_id = ?",
+ (worker_id,
+ ))
row = cursor.fetchone()
return row[0] if row else 0
except Exception as e:
@@ -1869,7 +2066,9 @@ class API_folder_store:
"""Retrieve a worker entry by ID."""
try:
cursor = self.connection.cursor()
- cursor.execute("SELECT * FROM worker WHERE worker_id = ?", (worker_id,))
+ cursor.execute("SELECT * FROM worker WHERE worker_id = ?",
+ (worker_id,
+ ))
row = cursor.fetchone()
return dict(row) if row else None
except Exception as e:
@@ -1880,7 +2079,9 @@ class API_folder_store:
"""Get all active (running) workers."""
try:
cursor = self.connection.cursor()
- cursor.execute("SELECT * FROM worker WHERE status = 'running' ORDER BY started_at DESC")
+ cursor.execute(
+ "SELECT * FROM worker WHERE status = 'running' ORDER BY started_at DESC"
+ )
return [dict(row) for row in cursor.fetchall()]
except Exception as e:
logger.error(f"Error retrieving active workers: {e}", exc_info=True)
@@ -1894,7 +2095,8 @@ class API_folder_store:
"""
SELECT * FROM worker ORDER BY started_at DESC LIMIT ?
""",
- (limit,),
+ (limit,
+ ),
)
return [dict(row) for row in cursor.fetchall()]
except Exception as e:
@@ -1905,7 +2107,9 @@ class API_folder_store:
"""Delete a worker entry."""
try:
cursor = self.connection.cursor()
- cursor.execute("DELETE FROM worker WHERE worker_id = ?", (worker_id,))
+ cursor.execute("DELETE FROM worker WHERE worker_id = ?",
+ (worker_id,
+ ))
self.connection.commit()
return cursor.rowcount > 0
except Exception as e:
@@ -1922,7 +2126,8 @@ class API_folder_store:
WHERE status IN ('completed', 'error')
AND completed_at < datetime('now', '-' || ? || ' days')
""",
- (days,),
+ (days,
+ ),
)
self.connection.commit()
return cursor.rowcount
@@ -1968,7 +2173,10 @@ class API_folder_store:
AND worker_id LIKE ?
AND COALESCE(last_updated, started_at, created_at) < datetime('now', ?)
""",
- (status, auto_reason, worker_id_prefix, cutoff),
+ (status,
+ auto_reason,
+ worker_id_prefix,
+ cutoff),
)
else:
cursor.execute(
@@ -1984,7 +2192,9 @@ class API_folder_store:
WHERE status = 'running'
AND COALESCE(last_updated, started_at, created_at) < datetime('now', ?)
""",
- (status, auto_reason, cutoff),
+ (status,
+ auto_reason,
+ cutoff),
)
self.connection.commit()
return cursor.rowcount
@@ -1993,7 +2203,11 @@ class API_folder_store:
return 0
def append_worker_stdout(
- self, worker_id: str, text: str, step: Optional[str] = None, channel: str = "stdout"
+ self,
+ worker_id: str,
+ text: str,
+ step: Optional[str] = None,
+ channel: str = "stdout"
) -> bool:
"""Append text to a worker's stdout log and timeline."""
if not text:
@@ -2001,11 +2215,17 @@ class API_folder_store:
try:
# Check if connection is valid
if not self.connection:
- logger.warning(f"Database connection not available for worker {worker_id}")
+ logger.warning(
+ f"Database connection not available for worker {worker_id}"
+ )
return False
cursor = self.connection.cursor()
- cursor.execute("SELECT stdout FROM worker WHERE worker_id = ?", (worker_id,))
+ cursor.execute(
+ "SELECT stdout FROM worker WHERE worker_id = ?",
+ (worker_id,
+ )
+ )
row = cursor.fetchone()
if not row:
@@ -2014,7 +2234,8 @@ class API_folder_store:
current_stdout = row[0] or ""
separator = (
- "" if not current_stdout else ("" if current_stdout.endswith("\n") else "\n")
+ "" if not current_stdout else
+ ("" if current_stdout.endswith("\n") else "\n")
)
new_stdout = f"{current_stdout}{separator}{text}\n"
@@ -2024,9 +2245,17 @@ class API_folder_store:
last_stdout_at = CURRENT_TIMESTAMP
WHERE worker_id = ?
""",
- (new_stdout, worker_id),
+ (new_stdout,
+ worker_id),
+ )
+ self._insert_worker_log_entry(
+ cursor,
+ worker_id,
+ "stdout",
+ text,
+ step,
+ channel
)
- self._insert_worker_log_entry(cursor, worker_id, "stdout", text, step, channel)
self.connection.commit()
return cursor.rowcount > 0
@@ -2037,21 +2266,34 @@ class API_folder_store:
f"Database connection closed, cannot append stdout for worker {worker_id}"
)
return False
- logger.error(f"Error appending stdout to worker {worker_id}: {e}", exc_info=True)
+ logger.error(
+ f"Error appending stdout to worker {worker_id}: {e}",
+ exc_info=True
+ )
return False
except Exception as e:
- logger.error(f"Error appending stdout to worker {worker_id}: {e}", exc_info=True)
+ logger.error(
+ f"Error appending stdout to worker {worker_id}: {e}",
+ exc_info=True
+ )
return False
def get_worker_stdout(self, worker_id: str) -> str:
"""Get stdout logs for a worker."""
try:
cursor = self.connection.cursor()
- cursor.execute("SELECT stdout FROM worker WHERE worker_id = ?", (worker_id,))
+ cursor.execute(
+ "SELECT stdout FROM worker WHERE worker_id = ?",
+ (worker_id,
+ )
+ )
row = cursor.fetchone()
return row[0] if row and row[0] else ""
except Exception as e:
- logger.error(f"Error getting worker stdout for {worker_id}: {e}", exc_info=True)
+ logger.error(
+ f"Error getting worker stdout for {worker_id}: {e}",
+ exc_info=True
+ )
return ""
def append_worker_steps(self, worker_id: str, step_text: str) -> bool:
@@ -2060,7 +2302,11 @@ class API_folder_store:
return True
try:
cursor = self.connection.cursor()
- cursor.execute("SELECT steps FROM worker WHERE worker_id = ?", (worker_id,))
+ cursor.execute(
+ "SELECT steps FROM worker WHERE worker_id = ?",
+ (worker_id,
+ )
+ )
row = cursor.fetchone()
if not row:
@@ -2078,25 +2324,44 @@ class API_folder_store:
current_step = ?
WHERE worker_id = ?
""",
- (new_steps, step_text, worker_id),
+ (new_steps,
+ step_text,
+ worker_id),
+ )
+ self._insert_worker_log_entry(
+ cursor,
+ worker_id,
+ "step",
+ step_text,
+ step_text,
+ "step"
)
- self._insert_worker_log_entry(cursor, worker_id, "step", step_text, step_text, "step")
self.connection.commit()
return cursor.rowcount > 0
except Exception as e:
- logger.error(f"Error appending step to worker {worker_id}: {e}", exc_info=True)
+ logger.error(
+ f"Error appending step to worker {worker_id}: {e}",
+ exc_info=True
+ )
return False
def get_worker_steps(self, worker_id: str) -> str:
"""Get step logs for a worker."""
try:
cursor = self.connection.cursor()
- cursor.execute("SELECT steps FROM worker WHERE worker_id = ?", (worker_id,))
+ cursor.execute(
+ "SELECT steps FROM worker WHERE worker_id = ?",
+ (worker_id,
+ )
+ )
row = cursor.fetchone()
return row[0] if row and row[0] else ""
except Exception as e:
- logger.error(f"Error getting worker steps for {worker_id}: {e}", exc_info=True)
+ logger.error(
+ f"Error getting worker steps for {worker_id}: {e}",
+ exc_info=True
+ )
return ""
def clear_worker_stdout(self, worker_id: str) -> bool:
@@ -2108,7 +2373,8 @@ class API_folder_store:
UPDATE worker SET stdout = '', last_updated = CURRENT_TIMESTAMP
WHERE worker_id = ?
""",
- (worker_id,),
+ (worker_id,
+ ),
)
self.clear_worker_events(worker_id, event_type="stdout")
self.connection.commit()
@@ -2169,7 +2435,11 @@ class DatabaseAPI:
def get_file_hash_by_hash(self, file_hash: str) -> Optional[str]:
"""Get file hash from the database, or None if not found."""
cursor = self.get_cursor()
- cursor.execute("SELECT hash FROM files WHERE LOWER(hash) = ?", (file_hash.lower(),))
+ cursor.execute(
+ "SELECT hash FROM files WHERE LOWER(hash) = ?",
+ (file_hash.lower(),
+ )
+ )
row = cursor.fetchone()
return row[0] if row else None
@@ -2177,7 +2447,8 @@ class DatabaseAPI:
"""Get all file hashes in the database."""
cursor = self.get_cursor()
cursor.execute("SELECT hash FROM files")
- return {row[0] for row in cursor.fetchall()}
+ return {row[0]
+ for row in cursor.fetchall()}
def get_file_hashes_by_tag_pattern(self, query_pattern: str) -> List[tuple]:
"""Get (hash, tag) tuples matching a tag pattern."""
@@ -2189,7 +2460,8 @@ class DatabaseAPI:
JOIN tags t ON f.hash = t.hash
WHERE LOWER(t.tag) LIKE ?
""",
- (query_pattern,),
+ (query_pattern,
+ ),
)
return cursor.fetchall()
@@ -2197,9 +2469,12 @@ class DatabaseAPI:
"""Get hashes of files matching a path pattern."""
cursor = self.get_cursor()
cursor.execute(
- "SELECT DISTINCT hash FROM files WHERE LOWER(file_path) LIKE ?", (like_pattern,)
+ "SELECT DISTINCT hash FROM files WHERE LOWER(file_path) LIKE ?",
+ (like_pattern,
+ )
)
- return {row[0] for row in cursor.fetchall()}
+ return {row[0]
+ for row in cursor.fetchall()}
def get_file_hashes_by_tag_substring(self, like_pattern: str) -> Set[str]:
"""Get hashes of files matching a tag substring."""
@@ -2211,9 +2486,11 @@ class DatabaseAPI:
JOIN tags t ON f.hash = t.hash
WHERE LOWER(t.tag) LIKE ?
""",
- (like_pattern,),
+ (like_pattern,
+ ),
)
- return {row[0] for row in cursor.fetchall()}
+ return {row[0]
+ for row in cursor.fetchall()}
def get_file_hashes_with_any_url(self, limit: Optional[int] = None) -> Set[str]:
"""Get hashes of files that have any non-empty URL metadata."""
@@ -2228,12 +2505,16 @@ class DatabaseAPI:
AND TRIM(m.url) != '[]'
LIMIT ?
""",
- (limit or 10000,),
+ (limit or 10000,
+ ),
)
- return {row[0] for row in cursor.fetchall()}
+ return {row[0]
+ for row in cursor.fetchall()}
def get_file_hashes_by_url_like(
- self, like_pattern: str, limit: Optional[int] = None
+ self,
+ like_pattern: str,
+ limit: Optional[int] = None
) -> Set[str]:
"""Get hashes of files whose URL metadata contains a substring (case-insensitive)."""
cursor = self.get_cursor()
@@ -2246,11 +2527,15 @@ class DatabaseAPI:
AND LOWER(m.url) LIKE ?
LIMIT ?
""",
- (like_pattern.lower(), limit or 10000),
+ (like_pattern.lower(),
+ limit or 10000),
)
- return {row[0] for row in cursor.fetchall()}
+ return {row[0]
+ for row in cursor.fetchall()}
- def get_file_hashes_by_ext(self, ext_value: str, limit: Optional[int] = None) -> Set[str]:
+ def get_file_hashes_by_ext(self,
+ ext_value: str,
+ limit: Optional[int] = None) -> Set[str]:
"""Get hashes of files whose metadata ext matches the given extension.
Matches case-insensitively and ignores any leading '.' in stored ext.
@@ -2276,7 +2561,8 @@ class DatabaseAPI:
WHERE LOWER(LTRIM(COALESCE(m.ext, ''), '.')) LIKE ? ESCAPE '\\'
LIMIT ?
""",
- (pattern, limit or 10000),
+ (pattern,
+ limit or 10000),
)
else:
cursor.execute(
@@ -2287,11 +2573,15 @@ class DatabaseAPI:
WHERE LOWER(LTRIM(COALESCE(m.ext, ''), '.')) = ?
LIMIT ?
""",
- (ext_clean, limit or 10000),
+ (ext_clean,
+ limit or 10000),
)
- return {row[0] for row in cursor.fetchall()}
+ return {row[0]
+ for row in cursor.fetchall()}
- def get_files_by_ext(self, ext_value: str, limit: Optional[int] = None) -> List[tuple]:
+ def get_files_by_ext(self,
+ ext_value: str,
+ limit: Optional[int] = None) -> List[tuple]:
"""Get files whose metadata ext matches the given extension.
Returns (hash, file_path, size, ext) tuples.
@@ -2313,7 +2603,8 @@ class DatabaseAPI:
ORDER BY f.file_path
LIMIT ?
""",
- (ext_clean, limit or 10000),
+ (ext_clean,
+ limit or 10000),
)
return cursor.fetchall()
@@ -2336,11 +2627,14 @@ class DatabaseAPI:
ORDER BY f.file_path
LIMIT ?
""",
- (limit or 10000,),
+ (limit or 10000,
+ ),
)
return cursor.fetchall()
- def get_files_by_url_like(self, like_pattern: str, limit: Optional[int] = None) -> List[tuple]:
+ def get_files_by_url_like(self,
+ like_pattern: str,
+ limit: Optional[int] = None) -> List[tuple]:
"""Get files whose URL metadata contains a substring (case-insensitive).
Returns (hash, file_path, size, ext) tuples.
@@ -2358,11 +2652,14 @@ class DatabaseAPI:
ORDER BY f.file_path
LIMIT ?
""",
- (like_pattern.lower(), limit or 10000),
+ (like_pattern.lower(),
+ limit or 10000),
)
return cursor.fetchall()
- def get_file_metadata(self, file_hashes: Set[str], limit: Optional[int] = None) -> List[tuple]:
+ def get_file_metadata(self,
+ file_hashes: Set[str],
+ limit: Optional[int] = None) -> List[tuple]:
"""Get metadata for files given their hashes. Returns (hash, file_path, size, extension) tuples."""
if not file_hashes:
return []
@@ -2392,17 +2689,22 @@ class DatabaseAPI:
ORDER BY file_path
LIMIT ?
""",
- (limit or 1000,),
+ (limit or 1000,
+ ),
)
return cursor.fetchall()
def get_tags_for_file(self, file_hash: str) -> List[str]:
"""Get all tags for a file given its hash."""
cursor = self.get_cursor()
- cursor.execute("SELECT tag FROM tags WHERE hash = ?", (file_hash,))
+ cursor.execute("SELECT tag FROM tags WHERE hash = ?",
+ (file_hash,
+ ))
return [row[0] for row in cursor.fetchall()]
- def get_tags_by_namespace_and_file(self, file_hash: str, query_pattern: str) -> List[str]:
+ def get_tags_by_namespace_and_file(self,
+ file_hash: str,
+ query_pattern: str) -> List[str]:
"""Get tags for a file matching a pattern."""
cursor = self.get_cursor()
cursor.execute(
@@ -2411,12 +2713,15 @@ class DatabaseAPI:
WHERE hash = ?
AND LOWER(tag) LIKE ?
""",
- (file_hash, query_pattern),
+ (file_hash,
+ query_pattern),
)
return [row[0] for row in cursor.fetchall()]
def get_files_by_namespace_pattern(
- self, query_pattern: str, limit: Optional[int] = None
+ self,
+ query_pattern: str,
+ limit: Optional[int] = None
) -> List[tuple]:
"""Get files with tags matching a pattern. Returns (hash, file_path, size, ext) tuples."""
cursor = self.get_cursor()
@@ -2431,12 +2736,15 @@ class DatabaseAPI:
ORDER BY f.file_path
LIMIT ?
""",
- (query_pattern, limit or 1000),
+ (query_pattern,
+ limit or 1000),
)
return cursor.fetchall()
def get_files_by_simple_tag_pattern(
- self, query_pattern: str, limit: Optional[int] = None
+ self,
+ query_pattern: str,
+ limit: Optional[int] = None
) -> List[tuple]:
"""Get files with non-namespaced tags matching a pattern. Returns (hash, file_path, size, ext) tuples."""
cursor = self.get_cursor()
@@ -2451,12 +2759,16 @@ class DatabaseAPI:
ORDER BY f.file_path
LIMIT ?
""",
- (query_pattern, limit or 1000),
+ (query_pattern,
+ limit or 1000),
)
return cursor.fetchall()
def get_files_by_multiple_path_conditions(
- self, conditions: List[str], params: List[str], limit: Optional[int] = None
+ self,
+ conditions: List[str],
+ params: List[str],
+ limit: Optional[int] = None
) -> List[tuple]:
"""Get files matching multiple path conditions. Returns (hash, file_path, size, ext) tuples."""
cursor = self.get_cursor()
@@ -2474,7 +2786,9 @@ class DatabaseAPI:
return cursor.fetchall()
def get_files_by_title_tag_pattern(
- self, title_pattern: str, limit: Optional[int] = None
+ self,
+ title_pattern: str,
+ limit: Optional[int] = None
) -> List[tuple]:
"""Get files with title tags matching a pattern. Returns (hash, file_path, size, ext) tuples."""
cursor = self.get_cursor()
@@ -2489,7 +2803,8 @@ class DatabaseAPI:
ORDER BY f.file_path
LIMIT ?
""",
- (title_pattern, limit or 10000),
+ (title_pattern,
+ limit or 10000),
)
return cursor.fetchall()
@@ -2546,7 +2861,9 @@ class LocalLibraryInitializer:
cursor = self.db.connection.cursor()
cursor.execute("SELECT COUNT(*) FROM files")
row = cursor.fetchone()
- self.stats["files_total_db"] = int(row[0]) if row and row[0] is not None else 0
+ self.stats["files_total_db"] = int(
+ row[0]
+ ) if row and row[0] is not None else 0
except Exception:
self.stats["files_total_db"] = 0
@@ -2577,16 +2894,16 @@ class LocalLibraryInitializer:
continue
stem = file_path.stem.lower()
- is_hash_named = len(stem) == 64 and all(ch in "0123456789abcdef" for ch in stem)
+ is_hash_named = len(stem) == 64 and all(
+ ch in "0123456789abcdef" for ch in stem
+ )
if is_hash_named:
continue
# If any sidecars exist for this file, let the sidecar importer handle it.
- if (
- file_path.with_name(file_path.name + ".tag").exists()
- or file_path.with_name(file_path.name + ".metadata").exists()
- or file_path.with_name(file_path.name + ".notes").exists()
- ):
+ if (file_path.with_name(file_path.name + ".tag").exists() or
+ file_path.with_name(file_path.name + ".metadata").exists()
+ or file_path.with_name(file_path.name + ".notes").exists()):
continue
file_hash = sha256_file(file_path)
@@ -2608,7 +2925,8 @@ class LocalLibraryInitializer:
cursor = self.db.connection.cursor()
cursor.execute(
"UPDATE files SET file_path = ?, updated_at = CURRENT_TIMESTAMP WHERE hash = ?",
- (self.db._to_db_file_path(target_path), file_hash),
+ (self.db._to_db_file_path(target_path),
+ file_hash),
)
except Exception as exc:
logger.debug(
@@ -2638,7 +2956,9 @@ class LocalLibraryInitializer:
try:
file_path.rename(target_path)
except Exception as exc:
- logger.warning(f"Failed to rename {file_path} -> {target_path}: {exc}")
+ logger.warning(
+ f"Failed to rename {file_path} -> {target_path}: {exc}"
+ )
self.stats["errors"] += 1
continue
@@ -2647,7 +2967,8 @@ class LocalLibraryInitializer:
cursor = self.db.connection.cursor()
cursor.execute(
"UPDATE files SET file_path = ?, updated_at = CURRENT_TIMESTAMP WHERE hash = ?",
- (self.db._to_db_file_path(target_path), file_hash),
+ (self.db._to_db_file_path(target_path),
+ file_hash),
)
except Exception:
pass
@@ -2673,19 +2994,24 @@ class LocalLibraryInitializer:
if renamed:
self.stats["files_hashed_renamed"] = (
- int(self.stats.get("files_hashed_renamed", 0) or 0) + renamed
+ int(self.stats.get("files_hashed_renamed",
+ 0) or 0) + renamed
)
if skipped_existing_target:
self.stats["files_hashed_skipped_target_exists"] = (
- int(self.stats.get("files_hashed_skipped_target_exists", 0) or 0)
- + skipped_existing_target
+ int(self.stats.get("files_hashed_skipped_target_exists",
+ 0) or 0) + skipped_existing_target
)
if duplicates_quarantined:
self.stats["duplicates_quarantined"] = (
- int(self.stats.get("duplicates_quarantined", 0) or 0) + duplicates_quarantined
+ int(self.stats.get("duplicates_quarantined",
+ 0) or 0) + duplicates_quarantined
)
except Exception as exc:
- logger.error(f"Error hashing/renaming non-sidecar media files: {exc}", exc_info=True)
+ logger.error(
+ f"Error hashing/renaming non-sidecar media files: {exc}",
+ exc_info=True
+ )
self.stats["errors"] += 1
def _find_media_files(self) -> List[Path]:
@@ -2739,7 +3065,9 @@ class LocalLibraryInitializer:
file_hash = sha256_file(file_path)
try:
cursor = self.db.connection.cursor()
- cursor.execute("SELECT 1 FROM files WHERE hash = ?", (file_hash,))
+ cursor.execute("SELECT 1 FROM files WHERE hash = ?",
+ (file_hash,
+ ))
exists_by_hash = cursor.fetchone() is not None
except Exception:
exists_by_hash = False
@@ -2747,7 +3075,8 @@ class LocalLibraryInitializer:
if exists_by_hash:
self.stats["files_existing"] += 1
self.stats["duplicates_found"] = (
- int(self.stats.get("duplicates_found", 0) or 0) + 1
+ int(self.stats.get("duplicates_found",
+ 0) or 0) + 1
)
logger.info(
f"Duplicate content detected during scan (hash={file_hash}): {file_path}"
@@ -2795,19 +3124,26 @@ class LocalLibraryInitializer:
self.stats["sidecars_imported"] += 1
except Exception as e:
- logger.warning(f"Error importing sidecar bundle for {base_path}: {e}")
+ logger.warning(
+ f"Error importing sidecar bundle for {base_path}: {e}"
+ )
self.stats["errors"] += 1
except Exception as e:
logger.error(f"Error batch importing sidecars: {e}", exc_info=True)
def _collect_sidecars(self) -> Dict[Path, Dict[str, List[Path]]]:
"""Collect sidecars grouped by their base media file."""
- sidecar_map: Dict[Path, Dict[str, List[Path]]] = {}
+ sidecar_map: Dict[Path,
+ Dict[str,
+ List[Path]]] = {}
patterns = [
- ("*.tag", "tag"),
- ("*.metadata", "metadata"),
- ("*.notes", "notes"),
+ ("*.tag",
+ "tag"),
+ ("*.metadata",
+ "metadata"),
+ ("*.notes",
+ "notes"),
]
for pattern, key in patterns:
@@ -2820,7 +3156,14 @@ class LocalLibraryInitializer:
if not base.exists():
continue
- bucket = sidecar_map.setdefault(base, {"tag": [], "metadata": [], "notes": []})
+ bucket = sidecar_map.setdefault(
+ base,
+ {
+ "tag": [],
+ "metadata": [],
+ "notes": []
+ }
+ )
bucket[key].append(sidecar)
return sidecar_map
@@ -2840,7 +3183,11 @@ class LocalLibraryInitializer:
return tags
def _read_metadata_sidecar(self, sidecars: Dict[str, List[Path]]) -> Dict[str, Any]:
- metadata: Dict[str, Any] = {"url": [], "relationships": []}
+ metadata: Dict[str,
+ Any] = {
+ "url": [],
+ "relationships": []
+ }
meta_path = sidecars.get("metadata", [])
if not meta_path:
@@ -2885,9 +3232,11 @@ class LocalLibraryInitializer:
return content
return None
- def _ensure_hashed_filename(
- self, file_path: Path, sidecars: Dict[str, List[Path]]
- ) -> Tuple[Path, str]:
+ def _ensure_hashed_filename(self,
+ file_path: Path,
+ sidecars: Dict[str,
+ List[Path]]) -> Tuple[Path,
+ str]:
"""Compute hash, rename file to hash-based name, and move sidecars accordingly."""
file_hash = sha256_file(file_path)
target_name = f"{file_hash}{file_path.suffix}"
@@ -2899,7 +3248,9 @@ class LocalLibraryInitializer:
try:
if target_path.exists():
- logger.warning(f"Hash target already exists, keeping original: {target_path}")
+ logger.warning(
+ f"Hash target already exists, keeping original: {target_path}"
+ )
return file_path, file_hash
file_path.rename(target_path)
@@ -2915,13 +3266,23 @@ class LocalLibraryInitializer:
return file_path, file_hash
def _rename_sidecars(
- self, old_base: Path, new_base: Path, sidecars: Dict[str, List[Path]]
+ self,
+ old_base: Path,
+ new_base: Path,
+ sidecars: Dict[str,
+ List[Path]]
) -> None:
"""Rename sidecars to follow the new hashed filename."""
mappings = [
- (sidecars.get("tag", []), ".tag"),
- (sidecars.get("metadata", []), ".metadata"),
- (sidecars.get("notes", []), ".notes"),
+ (sidecars.get("tag",
+ []),
+ ".tag"),
+ (sidecars.get("metadata",
+ []),
+ ".metadata"),
+ (sidecars.get("notes",
+ []),
+ ".notes"),
]
for candidates, suffix in mappings:
@@ -2963,7 +3324,9 @@ class LocalLibraryInitializer:
sidecar_path.unlink()
self.stats["sidecars_deleted"] += 1
except Exception as e:
- logger.warning(f"Could not delete orphaned sidecar {sidecar_path}: {e}")
+ logger.warning(
+ f"Could not delete orphaned sidecar {sidecar_path}: {e}"
+ )
except Exception as e:
logger.error(f"Error cleaning up orphaned sidecars: {e}", exc_info=True)
@@ -3004,7 +3367,7 @@ def migrate_metadata_to_db(library_root: Path, db: API_folder_store) -> int:
try:
for metadata_file in library_root.rglob("*.metadata"):
try:
- base_path = Path(str(metadata_file)[: -len(".metadata")])
+ base_path = Path(str(metadata_file)[:-len(".metadata")])
metadata_text = metadata_file.read_text(encoding="utf-8")
metadata = _parse_metadata_file(metadata_text)
@@ -3015,7 +3378,9 @@ def migrate_metadata_to_db(library_root: Path, db: API_folder_store) -> int:
metadata_file.unlink()
logger.info(f"Migrated and deleted {metadata_file}")
except Exception as e:
- logger.warning(f"Migrated {metadata_file} but failed to delete: {e}")
+ logger.warning(
+ f"Migrated {metadata_file} but failed to delete: {e}"
+ )
except Exception as e:
logger.warning(f"Failed to migrate {metadata_file}: {e}")
@@ -3035,7 +3400,9 @@ def _parse_metadata_file(content: str) -> Dict[str, Any]:
return {}
-def migrate_all(library_root: Path, db: Optional[API_folder_store] = None) -> Dict[str, int]:
+def migrate_all(library_root: Path,
+ db: Optional[API_folder_store] = None) -> Dict[str,
+ int]:
"""Migrate all sidecar files to database."""
should_close = db is None
@@ -3044,8 +3411,10 @@ def migrate_all(library_root: Path, db: Optional[API_folder_store] = None) -> Di
db = API_folder_store(library_root)
return {
- "tags": migrate_tags_to_db(library_root, db),
- "metadata": migrate_metadata_to_db(library_root, db),
+ "tags": migrate_tags_to_db(library_root,
+ db),
+ "metadata": migrate_metadata_to_db(library_root,
+ db),
}
finally:
if should_close:
@@ -3098,7 +3467,11 @@ class LocalLibrarySearchOptimizer:
except Exception as e:
logger.warning(f"Failed to prefetch {file_path}: {e}")
- def update_search_result_with_cached_data(self, search_result: Any, file_path: Path) -> None:
+ def update_search_result_with_cached_data(
+ self,
+ search_result: Any,
+ file_path: Path
+ ) -> None:
"""Update a search result object with cached database data."""
if not self.db:
return
@@ -3134,7 +3507,8 @@ class LocalLibrarySearchOptimizer:
WHERE t.tag LIKE ?
LIMIT ?
""",
- (f"%{tag}%", limit),
+ (f"%{tag}%",
+ limit),
)
return [Path(row[0]) for row in cursor.fetchall()]
@@ -3157,7 +3531,8 @@ class LocalLibrarySearchOptimizer:
items = excluded.items,
updated_at = CURRENT_TIMESTAMP
""",
- (name, items_json),
+ (name,
+ items_json),
)
self.db.connection.commit()
return True
@@ -3199,7 +3574,9 @@ class LocalLibrarySearchOptimizer:
return None
try:
cursor = self.db.connection.cursor()
- cursor.execute("SELECT items FROM playlists WHERE name = ?", (name,))
+ cursor.execute("SELECT items FROM playlists WHERE name = ?",
+ (name,
+ ))
row = cursor.fetchone()
if row:
try:
@@ -3211,13 +3588,20 @@ class LocalLibrarySearchOptimizer:
logger.error(f"Failed to get playlist {name}: {e}")
return None
- def get_playlist_by_id(self, playlist_id: int) -> Optional[Tuple[str, List[Dict[str, Any]]]]:
+ def get_playlist_by_id(self,
+ playlist_id: int) -> Optional[Tuple[str,
+ List[Dict[str,
+ Any]]]]:
"""Get a specific playlist by ID. Returns (name, items)."""
if not self.db:
return None
try:
cursor = self.db.connection.cursor()
- cursor.execute("SELECT name, items FROM playlists WHERE id = ?", (playlist_id,))
+ cursor.execute(
+ "SELECT name, items FROM playlists WHERE id = ?",
+ (playlist_id,
+ )
+ )
row = cursor.fetchone()
if row:
try:
@@ -3236,7 +3620,9 @@ class LocalLibrarySearchOptimizer:
return False
try:
cursor = self.db.connection.cursor()
- cursor.execute("DELETE FROM playlists WHERE id = ?", (playlist_id,))
+ cursor.execute("DELETE FROM playlists WHERE id = ?",
+ (playlist_id,
+ ))
self.db.connection.commit()
return cursor.rowcount > 0
except Exception as e:
@@ -3272,7 +3658,10 @@ class LocalLibrarySearchOptimizer:
if not self.db:
return
self.db.set_relationship(
- file_path, related_file_path, rel_type, bidirectional=bidirectional
+ file_path,
+ related_file_path,
+ rel_type,
+ bidirectional=bidirectional
)
def find_files_pointing_to(self, target_path: Path) -> List[Dict[str, Any]]:
diff --git a/API/loc.py b/API/loc.py
index 381658c..833e21c 100644
--- a/API/loc.py
+++ b/API/loc.py
@@ -47,8 +47,10 @@ class LOCClient:
*,
start: int = 1,
count: int = 25,
- extra_params: Optional[Dict[str, Any]] = None,
- ) -> Dict[str, Any]:
+ extra_params: Optional[Dict[str,
+ Any]] = None,
+ ) -> Dict[str,
+ Any]:
"""Search the Chronicling America collection via LoC JSON API.
Args:
@@ -63,14 +65,17 @@ class LOCClient:
q = str(query or "").strip()
if not q:
- return {"results": []}
+ return {
+ "results": []
+ }
- params: Dict[str, Any] = {
- "q": q,
- "fo": "json",
- "c": int(count) if int(count) > 0 else 25,
- "sp": int(start) if int(start) > 0 else 1,
- }
+ params: Dict[str,
+ Any] = {
+ "q": q,
+ "fo": "json",
+ "c": int(count) if int(count) > 0 else 25,
+ "sp": int(start) if int(start) > 0 else 1,
+ }
if extra_params:
for k, v in extra_params.items():
if v is None:
diff --git a/CLI.py b/CLI.py
index 3d6e41b..105f866 100644
--- a/CLI.py
+++ b/CLI.py
@@ -1,5 +1,4 @@
from __future__ import annotations
-
"""Medeia-Macina CLI.
This module intentionally uses a class-based architecture:
@@ -57,7 +56,7 @@ from SYS.background_notifier import ensure_background_notifier
from SYS.logger import debug, set_debug
from SYS.worker_manager import WorkerManager
-from cmdlet_catalog import (
+from SYS.cmdlet_catalog import (
ensure_registry_loaded,
get_cmdlet_arg_choices,
get_cmdlet_arg_flags,
@@ -66,7 +65,7 @@ from cmdlet_catalog import (
list_cmdlet_metadata,
list_cmdlet_names,
)
-from config import get_local_storage_path, load_config
+from SYS.config import get_local_storage_path, load_config
from result_table import ResultTable
@@ -139,7 +138,13 @@ class SelectionSyntax:
class WorkerOutputMirror(io.TextIOBase):
"""Mirror stdout/stderr to worker manager while preserving console output."""
- def __init__(self, original: TextIO, manager: WorkerManager, worker_id: str, channel: str):
+ def __init__(
+ self,
+ original: TextIO,
+ manager: WorkerManager,
+ worker_id: str,
+ channel: str
+ ):
self._original = original
self._manager = manager
self._worker_id = worker_id
@@ -207,7 +212,8 @@ class WorkerStageSession:
orig_stderr: TextIO,
stdout_proxy: WorkerOutputMirror,
stderr_proxy: WorkerOutputMirror,
- config: Optional[Dict[str, Any]],
+ config: Optional[Dict[str,
+ Any]],
logging_enabled: bool,
completion_label: str,
error_label: str,
@@ -246,13 +252,18 @@ class WorkerStageSession:
if status == "completed":
self.manager.log_step(self.worker_id, self._completion_label)
else:
- self.manager.log_step(self.worker_id, f"{self._error_label}: {error_msg or status}")
+ self.manager.log_step(
+ self.worker_id,
+ f"{self._error_label}: {error_msg or status}"
+ )
except Exception:
pass
try:
self.manager.finish_worker(
- self.worker_id, result=status or "completed", error_msg=error_msg or ""
+ self.worker_id,
+ result=status or "completed",
+ error_msg=error_msg or ""
)
except Exception:
pass
@@ -306,7 +317,8 @@ class WorkerManagerRegistry:
manager.expire_running_workers(
older_than_seconds=120,
worker_id_prefix="cli_%",
- reason="CLI session ended unexpectedly; marking worker as failed",
+ reason=
+ "CLI session ended unexpectedly; marking worker as failed",
)
except Exception:
pass
@@ -319,7 +331,10 @@ class WorkerManagerRegistry:
return manager
except Exception as exc:
- print(f"[worker] Could not initialize worker manager: {exc}", file=sys.stderr)
+ print(
+ f"[worker] Could not initialize worker manager: {exc}",
+ file=sys.stderr
+ )
return None
@classmethod
@@ -346,7 +361,8 @@ class WorkerStages:
title: str,
description: str,
pipe_text: str,
- config: Optional[Dict[str, Any]],
+ config: Optional[Dict[str,
+ Any]],
completion_label: str,
error_label: str,
skip_logging_for: Optional[Set[str]] = None,
@@ -386,8 +402,18 @@ class WorkerStages:
orig_stdout = sys.stdout
orig_stderr = sys.stderr
- stdout_proxy = WorkerOutputMirror(orig_stdout, worker_manager, worker_id, "stdout")
- stderr_proxy = WorkerOutputMirror(orig_stderr, worker_manager, worker_id, "stderr")
+ stdout_proxy = WorkerOutputMirror(
+ orig_stdout,
+ worker_manager,
+ worker_id,
+ "stdout"
+ )
+ stderr_proxy = WorkerOutputMirror(
+ orig_stderr,
+ worker_manager,
+ worker_id,
+ "stderr"
+ )
sys.stdout = stdout_proxy
sys.stderr = stderr_proxy
if isinstance(config, dict):
@@ -418,10 +444,12 @@ class WorkerStages:
*,
cmd_name: str,
stage_tokens: Sequence[str],
- config: Optional[Dict[str, Any]],
+ config: Optional[Dict[str,
+ Any]],
command_text: str,
) -> Optional[WorkerStageSession]:
- description = " ".join(stage_tokens[1:]) if len(stage_tokens) > 1 else "(no args)"
+ description = " ".join(stage_tokens[1:]
+ ) if len(stage_tokens) > 1 else "(no args)"
session_worker_ids = None
if isinstance(config, dict):
session_worker_ids = config.get("_session_worker_ids")
@@ -435,7 +463,9 @@ class WorkerStages:
config=config,
completion_label="Stage completed",
error_label="Stage error",
- skip_logging_for={".worker", "worker", "workers"},
+ skip_logging_for={".worker",
+ "worker",
+ "workers"},
session_worker_ids=session_worker_ids,
)
@@ -445,7 +475,8 @@ class WorkerStages:
worker_manager: Optional[WorkerManager],
*,
pipeline_text: str,
- config: Optional[Dict[str, Any]],
+ config: Optional[Dict[str,
+ Any]],
) -> Optional[WorkerStageSession]:
session_worker_ids: Set[str] = set()
if isinstance(config, dict):
@@ -465,6 +496,7 @@ class WorkerStages:
class CmdletIntrospection:
+
@staticmethod
def cmdlet_names() -> List[str]:
try:
@@ -473,7 +505,9 @@ class CmdletIntrospection:
return []
@staticmethod
- def cmdlet_args(cmd_name: str, config: Optional[Dict[str, Any]] = None) -> List[str]:
+ def cmdlet_args(cmd_name: str,
+ config: Optional[Dict[str,
+ Any]] = None) -> List[str]:
try:
return get_cmdlet_arg_flags(cmd_name, config=config) or []
except Exception:
@@ -490,7 +524,12 @@ class CmdletIntrospection:
return []
@classmethod
- def arg_choices(cls, *, cmd_name: str, arg_name: str, config: Dict[str, Any]) -> List[str]:
+ def arg_choices(cls,
+ *,
+ cmd_name: str,
+ arg_name: str,
+ config: Dict[str,
+ Any]) -> List[str]:
try:
normalized_arg = (arg_name or "").lstrip("-").strip().lower()
@@ -509,20 +548,29 @@ class CmdletIntrospection:
provider_choices: List[str] = []
- if canonical_cmd in {"search-provider"} and list_search_providers is not None:
+ if canonical_cmd in {"search-provider"
+ } and list_search_providers is not None:
providers = list_search_providers(config) or {}
- available = [name for name, is_ready in providers.items() if is_ready]
+ available = [
+ name for name, is_ready in providers.items() if is_ready
+ ]
return sorted(available) if available else sorted(providers.keys())
if canonical_cmd in {"add-file"} and list_file_providers is not None:
providers = list_file_providers(config) or {}
- available = [name for name, is_ready in providers.items() if is_ready]
+ available = [
+ name for name, is_ready in providers.items() if is_ready
+ ]
return sorted(available) if available else sorted(providers.keys())
if list_search_providers is not None:
providers = list_search_providers(config) or {}
- available = [name for name, is_ready in providers.items() if is_ready]
- provider_choices = sorted(available) if available else sorted(providers.keys())
+ available = [
+ name for name, is_ready in providers.items() if is_ready
+ ]
+ provider_choices = sorted(available) if available else sorted(
+ providers.keys()
+ )
try:
from Provider.metadata_provider import list_metadata_providers
@@ -530,7 +578,8 @@ class CmdletIntrospection:
meta_providers = list_metadata_providers(config) or {}
meta_available = [n for n, ready in meta_providers.items() if ready]
meta_choices = (
- sorted(meta_available) if meta_available else sorted(meta_providers.keys())
+ sorted(meta_available)
+ if meta_available else sorted(meta_providers.keys())
)
except Exception:
meta_choices = []
@@ -563,7 +612,10 @@ class CmdletCompleter(Completer):
@staticmethod
def _used_arg_logicals(
- cmd_name: str, stage_tokens: List[str], config: Dict[str, Any]
+ cmd_name: str,
+ stage_tokens: List[str],
+ config: Dict[str,
+ Any]
) -> Set[str]:
"""Return logical argument names already used in this cmdlet stage.
@@ -571,7 +623,8 @@ class CmdletCompleter(Completer):
is considered used and should not be suggested again (even as `--url`).
"""
arg_flags = CmdletIntrospection.cmdlet_args(cmd_name, config)
- allowed = {a.lstrip("-").strip().lower() for a in arg_flags if a}
+ allowed = {a.lstrip("-").strip().lower()
+ for a in arg_flags if a}
if not allowed:
return set()
@@ -579,7 +632,8 @@ class CmdletCompleter(Completer):
for tok in stage_tokens[1:]:
if not tok or not tok.startswith("-"):
continue
- if tok in {"-", "--"}:
+ if tok in {"-",
+ "--"}:
continue
# Handle common `-arg=value` form.
raw = tok.split("=", 1)[0]
@@ -589,7 +643,11 @@ class CmdletCompleter(Completer):
return used
- def get_completions(self, document: Document, complete_event): # type: ignore[override]
+ def get_completions(
+ self,
+ document: Document,
+ complete_event
+ ): # type: ignore[override]
text = document.text_before_cursor
tokens = text.split()
ends_with_space = bool(text) and text[-1].isspace()
@@ -598,7 +656,7 @@ class CmdletCompleter(Completer):
for idx, tok in enumerate(tokens):
if tok == "|":
last_pipe = idx
- stage_tokens = tokens[last_pipe + 1 :] if last_pipe >= 0 else tokens
+ stage_tokens = tokens[last_pipe + 1:] if last_pipe >= 0 else tokens
if not stage_tokens:
for cmd in self.cmdlet_names:
@@ -654,7 +712,9 @@ class CmdletCompleter(Completer):
config = self._config_loader.load()
choices = CmdletIntrospection.arg_choices(
- cmd_name=cmd_name, arg_name=prev_token, config=config
+ cmd_name=cmd_name,
+ arg_name=prev_token,
+ config=config
)
if choices:
for choice in choices:
@@ -667,7 +727,8 @@ class CmdletCompleter(Completer):
logical_seen: Set[str] = set()
for arg in arg_names:
arg_low = arg.lower()
- prefer_single_dash = current_token in {"", "-"}
+ prefer_single_dash = current_token in {"",
+ "-"}
if prefer_single_dash and arg_low.startswith("--"):
continue
logical = arg.lstrip("-").lower()
@@ -690,7 +751,9 @@ class CmdletCompleter(Completer):
class MedeiaLexer(Lexer):
+
def lex_document(self, document: Document): # type: ignore[override]
+
def get_line(lineno: int):
line = document.lines[lineno]
tokens: List[tuple[str, str]] = []
@@ -757,7 +820,8 @@ class MedeiaLexer(Lexer):
if quote:
# If the quoted token contains a keyed spec (clip:/item:/hash:),
# highlight the `key:` portion in argument-blue even inside quotes.
- if len(quote) >= 2 and quote[0] == quote[-1] and quote[0] in ('"', "'"):
+ if len(quote) >= 2 and quote[0] == quote[-1] and quote[0] in ('"',
+ "'"):
q = quote[0]
inner = quote[1:-1]
start_index = len(tokens)
@@ -802,6 +866,7 @@ class MedeiaLexer(Lexer):
class ConfigLoader:
+
def __init__(self, *, root: Path) -> None:
self._root = root
@@ -813,6 +878,7 @@ class ConfigLoader:
class CmdletHelp:
+
@staticmethod
def show_cmdlet_list() -> None:
try:
@@ -821,7 +887,12 @@ class CmdletHelp:
from rich.panel import Panel
from rich.table import Table as RichTable
- table = RichTable(show_header=True, header_style="bold", box=SIMPLE, expand=True)
+ table = RichTable(
+ show_header=True,
+ header_style="bold",
+ box=SIMPLE,
+ expand=True
+ )
table.add_column("Cmdlet", no_wrap=True)
table.add_column("Aliases")
table.add_column("Args")
@@ -832,9 +903,12 @@ class CmdletHelp:
aliases = info.get("aliases", [])
args = info.get("args", [])
summary = info.get("summary") or ""
- alias_str = ", ".join([str(a) for a in (aliases or []) if str(a).strip()])
+ alias_str = ", ".join(
+ [str(a) for a in (aliases or []) if str(a).strip()]
+ )
arg_names = [
- a.get("name") for a in (args or []) if isinstance(a, dict) and a.get("name")
+ a.get("name") for a in (args or [])
+ if isinstance(a, dict) and a.get("name")
]
args_str = ", ".join([str(a) for a in arg_names if str(a).strip()])
table.add_row(str(cmd_name), alias_str, args_str, str(summary))
@@ -844,7 +918,11 @@ class CmdletHelp:
from rich.panel import Panel
from rich.text import Text
- stderr_console().print(Panel(Text(f"Error: {exc}"), title="Error", expand=False))
+ stderr_console().print(
+ Panel(Text(f"Error: {exc}"),
+ title="Error",
+ expand=False)
+ )
@staticmethod
def show_cmdlet_help(cmd_name: str) -> None:
@@ -865,7 +943,11 @@ class CmdletHelp:
from rich.text import Text
stderr_console().print(
- Panel(Text(f"Invalid metadata for {cmd_name}"), title="Error", expand=False)
+ Panel(
+ Text(f"Invalid metadata for {cmd_name}"),
+ title="Error",
+ expand=False
+ )
)
return
@@ -884,7 +966,12 @@ class CmdletHelp:
header = Text.assemble((str(name), "bold"))
synopsis = Text(str(usage or name))
- stdout_console().print(Panel(Group(header, synopsis), title="Help", expand=False))
+ stdout_console().print(
+ Panel(Group(header,
+ synopsis),
+ title="Help",
+ expand=False)
+ )
if summary or description:
desc_bits: List[Text] = []
@@ -892,10 +979,19 @@ class CmdletHelp:
desc_bits.append(Text(str(summary)))
if description:
desc_bits.append(Text(str(description)))
- stdout_console().print(Panel(Group(*desc_bits), title="Description", expand=False))
+ stdout_console().print(
+ Panel(Group(*desc_bits),
+ title="Description",
+ expand=False)
+ )
if args and isinstance(args, list):
- param_table = RichTable(show_header=True, header_style="bold", box=SIMPLE, expand=True)
+ param_table = RichTable(
+ show_header=True,
+ header_style="bold",
+ box=SIMPLE,
+ expand=True
+ )
param_table.add_column("Arg", no_wrap=True)
param_table.add_column("Type", no_wrap=True)
param_table.add_column("Required", no_wrap=True)
@@ -913,18 +1009,26 @@ class CmdletHelp:
desc = getattr(arg, "description", "")
param_table.add_row(
- f"-{name_str}", str(typ), "yes" if required else "no", str(desc or "")
+ f"-{name_str}",
+ str(typ),
+ "yes" if required else "no",
+ str(desc or "")
)
stdout_console().print(Panel(param_table, title="Parameters", expand=False))
if details:
stdout_console().print(
- Panel(Group(*[Text(str(x)) for x in details]), title="Remarks", expand=False)
+ Panel(
+ Group(*[Text(str(x)) for x in details]),
+ title="Remarks",
+ expand=False
+ )
)
class CmdletExecutor:
+
def __init__(self, *, config_loader: ConfigLoader) -> None:
self._config_loader = config_loader
@@ -942,15 +1046,18 @@ class CmdletExecutor:
i = 0
while i < len(tokens):
low = tokens[i].lower()
- if low in {"-provider", "--provider"} and i + 1 < len(tokens):
+ if low in {"-provider",
+ "--provider"} and i + 1 < len(tokens):
provider = str(tokens[i + 1]).strip()
i += 2
continue
- if low in {"-query", "--query"} and i + 1 < len(tokens):
+ if low in {"-query",
+ "--query"} and i + 1 < len(tokens):
query = str(tokens[i + 1]).strip()
i += 2
continue
- if low in {"-limit", "--limit"} and i + 1 < len(tokens):
+ if low in {"-limit",
+ "--limit"} and i + 1 < len(tokens):
i += 2
continue
if not str(tokens[i]).startswith("-"):
@@ -1078,19 +1185,20 @@ class CmdletExecutor:
# Some commands render their own Rich UI (tables/panels) and don't
# play nicely with Live cursor control.
if cmd_name_norm in {
- "get-relationship",
- "get-rel",
- ".pipe",
- ".matrix",
- ".telegram",
- "telegram",
- "delete-file",
- "del-file",
+ "get-relationship",
+ "get-rel",
+ ".pipe",
+ ".matrix",
+ ".telegram",
+ "telegram",
+ "delete-file",
+ "del-file",
}:
return
# add-file directory selector mode: show only the selection table, no Live progress.
- if cmd_name_norm in {"add-file", "add_file"}:
+ if cmd_name_norm in {"add-file",
+ "add_file"}:
try:
from pathlib import Path as _Path
@@ -1099,7 +1207,9 @@ class CmdletExecutor:
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
- if low in {"-path", "--path", "-p"} and i + 1 < len(toks):
+ if low in {"-path",
+ "--path",
+ "-p"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and ("," not in nxt):
p = _Path(nxt)
@@ -1114,8 +1224,8 @@ class CmdletExecutor:
try:
quiet_mode = (
bool(config.get("_quiet_background_output"))
- if isinstance(config, dict)
- else False
+ if isinstance(config,
+ dict) else False
)
except Exception:
quiet_mode = False
@@ -1160,15 +1270,18 @@ class CmdletExecutor:
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
- if (
- cmd_name_norm in {"add-file", "add_file"}
- and low in {"-path", "--path", "-p"}
- and i + 1 < len(toks)
- ):
+ if (cmd_name_norm in {"add-file",
+ "add_file"} and low in {"-path",
+ "--path",
+ "-p"}
+ and i + 1 < len(toks)):
nxt = str(toks[i + 1])
if nxt:
if "," in nxt:
- parts = [p.strip().strip("\"'") for p in nxt.split(",")]
+ parts = [
+ p.strip().strip("\"'")
+ for p in nxt.split(",")
+ ]
parts = [p for p in parts if p]
if parts:
preview.extend(parts)
@@ -1178,15 +1291,17 @@ class CmdletExecutor:
preview.append(nxt)
i += 2
continue
- if low in {"-url", "--url"} and i + 1 < len(toks):
+ if low in {"-url",
+ "--url"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and not nxt.startswith("-"):
preview.append(nxt)
i += 2
continue
- if (not t.startswith("-")) and (
- "://" in low or low.startswith(("magnet:", "torrent:"))
- ):
+ if (not t.startswith("-")) and ("://" in low
+ or low.startswith(
+ ("magnet:",
+ "torrent:"))):
preview.append(t)
i += 1
preview_items = preview if preview else None
@@ -1197,7 +1312,9 @@ class CmdletExecutor:
try:
progress_ui.begin_pipe(
- 0, total_items=int(total_items), items_preview=preview_items
+ 0,
+ total_items=int(total_items),
+ items_preview=preview_items
)
except Exception:
pass
@@ -1216,7 +1333,9 @@ class CmdletExecutor:
arg_specs = getattr(raw, "arg", None) if raw is not None else None
if isinstance(arg_specs, list):
for spec in arg_specs:
- spec_type = str(getattr(spec, "type", "string") or "string").strip().lower()
+ spec_type = str(getattr(spec,
+ "type",
+ "string") or "string").strip().lower()
if spec_type == "flag":
continue
spec_name = str(getattr(spec, "name", "") or "")
@@ -1238,7 +1357,8 @@ class CmdletExecutor:
filtered_args.append(arg)
continue
- if len(arg) >= 2 and arg[1] in {'"', "'"}:
+ if len(arg) >= 2 and arg[1] in {'"',
+ "'"}:
filtered_args.append(arg[1:].strip("\"'"))
continue
@@ -1269,16 +1389,19 @@ class CmdletExecutor:
result = piped_items
else:
result = [
- piped_items[idx] for idx in selected_indices if 0 <= idx < len(piped_items)
+ piped_items[idx] for idx in selected_indices
+ if 0 <= idx < len(piped_items)
]
worker_manager = WorkerManagerRegistry.ensure(config)
stage_session = WorkerStages.begin_stage(
worker_manager,
cmd_name=cmd_name,
- stage_tokens=[cmd_name, *filtered_args],
+ stage_tokens=[cmd_name,
+ *filtered_args],
config=config,
- command_text=" ".join([cmd_name, *filtered_args]).strip() or cmd_name,
+ command_text=" ".join([cmd_name,
+ *filtered_args]).strip() or cmd_name,
)
stage_worker_id = stage_session.worker_id if stage_session else None
@@ -1329,8 +1452,8 @@ class CmdletExecutor:
try:
raw_stage = (
ctx.get_current_command_text("")
- if hasattr(ctx, "get_current_command_text")
- else ""
+ if hasattr(ctx,
+ "get_current_command_text") else ""
)
except Exception:
raw_stage = ""
@@ -1338,7 +1461,8 @@ class CmdletExecutor:
ctx.set_current_stage_text(raw_stage)
else:
ctx.set_current_stage_text(
- " ".join([cmd_name, *filtered_args]).strip() or cmd_name
+ " ".join([cmd_name,
+ *filtered_args]).strip() or cmd_name
)
except Exception:
pass
@@ -1354,7 +1478,9 @@ class CmdletExecutor:
from cmdlet import _shared as sh
emits = sh.apply_output_path_from_pipeobjects(
- cmd_name=cmd_name, args=filtered_args, emits=emits
+ cmd_name=cmd_name,
+ args=filtered_args,
+ emits=emits
)
try:
pipeline_ctx.emits = list(emits)
@@ -1373,7 +1499,11 @@ class CmdletExecutor:
if is_format_selection:
ctx.set_last_result_items_only(emits)
else:
- table_title = self._get_table_title_for_command(cmd_name, emits, filtered_args)
+ table_title = self._get_table_title_for_command(
+ cmd_name,
+ emits,
+ filtered_args
+ )
selectable_commands = {
"search-file",
@@ -1434,7 +1564,8 @@ class CmdletExecutor:
try:
if pipe_idx is not None:
progress_ui.finish_pipe(
- int(pipe_idx), force_complete=(stage_status == "completed")
+ int(pipe_idx),
+ force_complete=(stage_status == "completed")
)
except Exception:
pass
@@ -1455,16 +1586,21 @@ class CmdletExecutor:
# If the cmdlet produced a current-stage table without emits (e.g. format selection),
# render it here for parity with REPL pipeline runner.
- if (not getattr(pipeline_ctx, "emits", None)) and hasattr(
- ctx, "get_current_stage_table"
- ):
+ if (not getattr(pipeline_ctx,
+ "emits",
+ None)) and hasattr(ctx,
+ "get_current_stage_table"):
try:
stage_table = ctx.get_current_stage_table()
except Exception:
stage_table = None
if stage_table is not None:
try:
- already_rendered = bool(getattr(stage_table, "_rendered_by_cmdlet", False))
+ already_rendered = bool(
+ getattr(stage_table,
+ "_rendered_by_cmdlet",
+ False)
+ )
except Exception:
already_rendered = False
@@ -1475,7 +1611,8 @@ class CmdletExecutor:
try:
if pipe_idx is not None:
progress_ui.finish_pipe(
- int(pipe_idx), force_complete=(stage_status == "completed")
+ int(pipe_idx),
+ force_complete=(stage_status == "completed")
)
except Exception:
pass
@@ -1506,7 +1643,8 @@ class CmdletExecutor:
try:
if pipe_idx is not None:
progress_ui.finish_pipe(
- int(pipe_idx), force_complete=(stage_status == "completed")
+ int(pipe_idx),
+ force_complete=(stage_status == "completed")
)
except Exception:
pass
@@ -1542,6 +1680,7 @@ class CmdletExecutor:
class PipelineExecutor:
+
def __init__(self, *, config_loader: ConfigLoader) -> None:
self._config_loader = config_loader
self._toolbar_output: Optional[Callable[[str], None]] = None
@@ -1619,17 +1758,19 @@ class PipelineExecutor:
@staticmethod
def _maybe_seed_current_stage_table(ctx: Any) -> None:
try:
- if hasattr(ctx, "get_current_stage_table") and not ctx.get_current_stage_table():
+ if hasattr(ctx,
+ "get_current_stage_table") and not ctx.get_current_stage_table():
display_table = (
- ctx.get_display_table() if hasattr(ctx, "get_display_table") else None
+ ctx.get_display_table() if hasattr(ctx,
+ "get_display_table") else None
)
if display_table:
ctx.set_current_stage_table(display_table)
else:
last_table = (
ctx.get_last_result_table()
- if hasattr(ctx, "get_last_result_table")
- else None
+ if hasattr(ctx,
+ "get_last_result_table") else None
)
if last_table:
ctx.set_current_stage_table(last_table)
@@ -1637,15 +1778,18 @@ class PipelineExecutor:
pass
@staticmethod
- def _maybe_apply_pending_pipeline_tail(ctx: Any, stages: List[List[str]]) -> List[List[str]]:
+ def _maybe_apply_pending_pipeline_tail(ctx: Any,
+ stages: List[List[str]]) -> List[List[str]]:
try:
pending_tail = (
- ctx.get_pending_pipeline_tail() if hasattr(ctx, "get_pending_pipeline_tail") else []
+ ctx.get_pending_pipeline_tail()
+ if hasattr(ctx,
+ "get_pending_pipeline_tail") else []
)
pending_source = (
ctx.get_pending_pipeline_source()
- if hasattr(ctx, "get_pending_pipeline_source")
- else None
+ if hasattr(ctx,
+ "get_pending_pipeline_source") else None
)
except Exception:
pending_tail = []
@@ -1654,8 +1798,8 @@ class PipelineExecutor:
try:
current_source = (
ctx.get_current_stage_table_source_command()
- if hasattr(ctx, "get_current_stage_table_source_command")
- else None
+ if hasattr(ctx,
+ "get_current_stage_table_source_command") else None
)
except Exception:
current_source = None
@@ -1663,17 +1807,18 @@ class PipelineExecutor:
try:
effective_source = current_source or (
ctx.get_last_result_table_source_command()
- if hasattr(ctx, "get_last_result_table_source_command")
- else None
+ if hasattr(ctx,
+ "get_last_result_table_source_command") else None
)
except Exception:
effective_source = current_source
- selection_only = bool(len(stages) == 1 and stages[0] and stages[0][0].startswith("@"))
+ selection_only = bool(
+ len(stages) == 1 and stages[0] and stages[0][0].startswith("@")
+ )
if pending_tail and selection_only:
- if (pending_source is None) or (
- effective_source and pending_source == effective_source
- ):
+ if (pending_source is None) or (effective_source
+ and pending_source == effective_source):
stages = list(stages) + list(pending_tail)
try:
if hasattr(ctx, "clear_pending_pipeline_tail"):
@@ -1699,7 +1844,10 @@ class PipelineExecutor:
@staticmethod
def _extract_first_stage_selection_tokens(
stages: List[List[str]],
- ) -> tuple[List[List[str]], List[int], bool, bool]:
+ ) -> tuple[List[List[str]],
+ List[int],
+ bool,
+ bool]:
first_stage_tokens = stages[0] if stages else []
first_stage_selection_indices: List[int] = []
first_stage_had_extra_args = False
@@ -1711,7 +1859,9 @@ class PipelineExecutor:
if token.startswith("@"): # selection
selection = SelectionSyntax.parse(token)
if selection is not None:
- first_stage_selection_indices = sorted([i - 1 for i in selection])
+ first_stage_selection_indices = sorted(
+ [i - 1 for i in selection]
+ )
continue
if token == "@*":
first_stage_select_all = True
@@ -1735,7 +1885,9 @@ class PipelineExecutor:
)
@staticmethod
- def _apply_select_all_if_requested(ctx: Any, indices: List[int], select_all: bool) -> List[int]:
+ def _apply_select_all_if_requested(ctx: Any,
+ indices: List[int],
+ select_all: bool) -> List[int]:
if not select_all:
return indices
try:
@@ -1748,7 +1900,11 @@ class PipelineExecutor:
@staticmethod
def _maybe_run_class_selector(
- ctx: Any, config: Any, selected_items: list, *, stage_is_last: bool
+ ctx: Any,
+ config: Any,
+ selected_items: list,
+ *,
+ stage_is_last: bool
) -> bool:
if not stage_is_last:
return False
@@ -1768,7 +1924,11 @@ class PipelineExecutor:
try:
current_table = ctx.get_current_stage_table() or ctx.get_last_result_table()
- _add(current_table.table if current_table and hasattr(current_table, "table") else None)
+ _add(
+ current_table.
+ table if current_table and hasattr(current_table,
+ "table") else None
+ )
except Exception:
pass
@@ -1791,7 +1951,8 @@ class PipelineExecutor:
if get_provider is not None:
for key in candidates:
try:
- if is_known_provider_name is not None and (not is_known_provider_name(key)):
+ if is_known_provider_name is not None and (
+ not is_known_provider_name(key)):
continue
except Exception:
# If the predicate fails for any reason, fall back to legacy behavior.
@@ -1804,7 +1965,11 @@ class PipelineExecutor:
if selector is None:
continue
try:
- handled = bool(selector(selected_items, ctx=ctx, stage_is_last=True))
+ handled = bool(
+ selector(selected_items,
+ ctx=ctx,
+ stage_is_last=True)
+ )
except Exception as exc:
print(f"{key} selector failed: {exc}\n")
return True
@@ -1828,7 +1993,8 @@ class PipelineExecutor:
store_registry = StoreRegistry(config, suppress_debug=True)
_backend_names = list(store_registry.list_backends() or [])
_backend_by_lower = {
- str(n).lower(): str(n) for n in _backend_names if str(n).strip()
+ str(n).lower(): str(n)
+ for n in _backend_names if str(n).strip()
}
for name in store_keys:
resolved_name = name
@@ -1840,7 +2006,11 @@ class PipelineExecutor:
selector = getattr(backend, "selector", None)
if selector is None:
continue
- handled = bool(selector(selected_items, ctx=ctx, stage_is_last=True))
+ handled = bool(
+ selector(selected_items,
+ ctx=ctx,
+ stage_is_last=True)
+ )
if handled:
return True
except Exception:
@@ -1850,7 +2020,10 @@ class PipelineExecutor:
@staticmethod
def _maybe_open_url_selection(
- current_table: Any, selected_items: list, *, stage_is_last: bool
+ current_table: Any,
+ selected_items: list,
+ *,
+ stage_is_last: bool
) -> bool:
if not stage_is_last:
return False
@@ -1865,10 +2038,10 @@ class PipelineExecutor:
table_type = ""
try:
source_cmd = (
- str(getattr(current_table, "source_command", "") or "")
- .strip()
- .replace("_", "-")
- .lower()
+ str(getattr(current_table,
+ "source_command",
+ "") or "").strip().replace("_",
+ "-").lower()
)
except Exception:
source_cmd = ""
@@ -1884,7 +2057,11 @@ class PipelineExecutor:
url = get_field(item, "url")
except Exception:
try:
- url = item.get("url") if isinstance(item, dict) else getattr(item, "url", None)
+ url = item.get("url") if isinstance(item,
+ dict
+ ) else getattr(item,
+ "url",
+ None)
except Exception:
url = None
@@ -1901,7 +2078,10 @@ class PipelineExecutor:
return False
def _maybe_enable_background_notifier(
- self, worker_manager: Any, config: Any, pipeline_session: Any
+ self,
+ worker_manager: Any,
+ config: Any,
+ pipeline_session: Any
) -> None:
if not (pipeline_session and worker_manager and isinstance(config, dict)):
return
@@ -1914,11 +2094,12 @@ class PipelineExecutor:
output_fn = self._toolbar_output
quiet_mode = bool(config.get("_quiet_background_output"))
terminal_only = quiet_mode and not output_fn
- kwargs: Dict[str, Any] = {
- "session_worker_ids": session_worker_ids,
- "only_terminal_updates": terminal_only,
- "overlay_mode": bool(output_fn),
- }
+ kwargs: Dict[str,
+ Any] = {
+ "session_worker_ids": session_worker_ids,
+ "only_terminal_updates": terminal_only,
+ "overlay_mode": bool(output_fn),
+ }
if output_fn:
kwargs["output"] = output_fn
ensure_background_notifier(worker_manager, **kwargs)
@@ -1945,14 +2126,16 @@ class PipelineExecutor:
first_stage_had_extra_args: bool,
worker_manager: Any,
pipeline_session: Any,
- ) -> tuple[bool, Any]:
+ ) -> tuple[bool,
+ Any]:
if not selection_indices:
return True, None
try:
if not ctx.get_current_stage_table_source_command():
display_table = (
- ctx.get_display_table() if hasattr(ctx, "get_display_table") else None
+ ctx.get_display_table() if hasattr(ctx,
+ "get_display_table") else None
)
table_for_stage = display_table or ctx.get_last_result_table()
if table_for_stage:
@@ -1982,12 +2165,14 @@ class PipelineExecutor:
except Exception:
current_table = None
table_type = (
- current_table.table if current_table and hasattr(current_table, "table") else None
+ current_table.table if current_table and hasattr(current_table,
+ "table") else None
)
command_expanded = False
- if table_type in {"youtube", "soulseek"}:
+ if table_type in {"youtube",
+ "soulseek"}:
command_expanded = False
elif source_cmd == "search-file" and source_args and "youtube" in source_args:
command_expanded = False
@@ -2005,11 +2190,15 @@ class PipelineExecutor:
row_args_list: List[List[str]] = []
for idx in selection_indices:
try:
- row_args = ctx.get_current_stage_table_row_selection_args(idx)
+ row_args = ctx.get_current_stage_table_row_selection_args(
+ idx
+ )
except Exception:
row_args = None
if isinstance(row_args, list) and row_args:
- row_args_list.append([str(x) for x in row_args if x is not None])
+ row_args_list.append(
+ [str(x) for x in row_args if x is not None]
+ )
# Combine `['-path', ]` from each row into one `-path` arg.
paths: List[str] = []
@@ -2019,9 +2208,9 @@ class PipelineExecutor:
if can_merge:
for ra in row_args_list:
if len(ra) == 2 and str(ra[0]).strip().lower() in {
- "-path",
- "--path",
- "-p",
+ "-path",
+ "--path",
+ "-p",
}:
p = str(ra[1]).strip()
if p:
@@ -2082,7 +2271,8 @@ class PipelineExecutor:
display_table = None
try:
display_table = (
- ctx.get_display_table() if hasattr(ctx, "get_display_table") else None
+ ctx.get_display_table() if hasattr(ctx,
+ "get_display_table") else None
)
except Exception:
display_table = None
@@ -2112,22 +2302,26 @@ class PipelineExecutor:
resolved_items = items_list if items_list else []
if items_list:
filtered = [
- resolved_items[i] for i in selection_indices if 0 <= i < len(resolved_items)
+ resolved_items[i] for i in selection_indices
+ if 0 <= i < len(resolved_items)
]
if not filtered:
print("No items matched selection in pipeline\n")
return False, None
if PipelineExecutor._maybe_run_class_selector(
- ctx, config, filtered, stage_is_last=(not stages)
- ):
+ ctx,
+ config,
+ filtered,
+ stage_is_last=(not stages)):
return False, None
from cmdlet._shared import coerce_to_pipe_object
filtered_pipe_objs = [coerce_to_pipe_object(item) for item in filtered]
piped_result = (
- filtered_pipe_objs if len(filtered_pipe_objs) > 1 else filtered_pipe_objs[0]
+ filtered_pipe_objs
+ if len(filtered_pipe_objs) > 1 else filtered_pipe_objs[0]
)
if pipeline_session and worker_manager:
@@ -2142,13 +2336,14 @@ class PipelineExecutor:
# Auto-insert downloader stages for provider tables.
try:
- current_table = ctx.get_current_stage_table() or ctx.get_last_result_table()
+ current_table = ctx.get_current_stage_table(
+ ) or ctx.get_last_result_table()
except Exception:
current_table = None
table_type = (
current_table.table
- if current_table and hasattr(current_table, "table")
- else None
+ if current_table and hasattr(current_table,
+ "table") else None
)
if not stages:
@@ -2161,48 +2356,52 @@ class PipelineExecutor:
elif table_type == "internetarchive":
print("Auto-loading Internet Archive item via download-file")
stages.append(["download-file"])
- elif table_type in {"soulseek", "openlibrary", "libgen"}:
+ elif table_type in {"soulseek",
+ "openlibrary",
+ "libgen"}:
print("Auto-piping selection to download-file")
stages.append(["download-file"])
else:
first_cmd = stages[0][0] if stages and stages[0] else None
if table_type == "soulseek" and first_cmd not in (
- "download-file",
- "download-media",
- "download_media",
- ".pipe",
+ "download-file",
+ "download-media",
+ "download_media",
+ ".pipe",
):
debug("Auto-inserting download-file after Soulseek selection")
stages.insert(0, ["download-file"])
if table_type == "youtube" and first_cmd not in (
- "download-media",
- "download_media",
- "download-file",
- ".pipe",
+ "download-media",
+ "download_media",
+ "download-file",
+ ".pipe",
):
debug("Auto-inserting download-media after YouTube selection")
stages.insert(0, ["download-media"])
if table_type == "bandcamp" and first_cmd not in (
- "download-media",
- "download_media",
- "download-file",
- ".pipe",
+ "download-media",
+ "download_media",
+ "download-file",
+ ".pipe",
):
print("Auto-inserting download-media after Bandcamp selection")
stages.insert(0, ["download-media"])
if table_type == "internetarchive" and first_cmd not in (
- "download-file",
- "download-media",
- "download_media",
- ".pipe",
+ "download-file",
+ "download-media",
+ "download_media",
+ ".pipe",
):
- debug("Auto-inserting download-file after Internet Archive selection")
+ debug(
+ "Auto-inserting download-file after Internet Archive selection"
+ )
stages.insert(0, ["download-file"])
if table_type == "libgen" and first_cmd not in (
- "download-file",
- "download-media",
- "download_media",
- ".pipe",
+ "download-file",
+ "download-media",
+ "download_media",
+ ".pipe",
):
print("Auto-inserting download-file after Libgen selection")
stages.insert(0, ["download-file"])
@@ -2215,15 +2414,19 @@ class PipelineExecutor:
return True, None
@staticmethod
- def _maybe_start_live_progress(
- config: Any, stages: List[List[str]]
- ) -> tuple[Any, Dict[int, int]]:
+ def _maybe_start_live_progress(config: Any,
+ stages: List[List[str]]) -> tuple[Any,
+ Dict[int,
+ int]]:
progress_ui = None
- pipe_index_by_stage: Dict[int, int] = {}
+ pipe_index_by_stage: Dict[int,
+ int] = {}
try:
quiet_mode = (
- bool(config.get("_quiet_background_output")) if isinstance(config, dict) else False
+ bool(config.get("_quiet_background_output"))
+ if isinstance(config,
+ dict) else False
)
except Exception:
quiet_mode = False
@@ -2231,7 +2434,8 @@ class PipelineExecutor:
try:
import sys as _sys
- if (not quiet_mode) and bool(getattr(_sys.stderr, "isatty", lambda: False)()):
+ if (not quiet_mode) and bool(getattr(_sys.stderr,
+ "isatty", lambda: False)()):
from models import PipelineLiveProgress
pipe_stage_indices: List[int] = []
@@ -2245,7 +2449,8 @@ class PipelineExecutor:
# add-file directory selector stage: avoid Live progress so the
# selection table renders cleanly.
- if name in {"add-file", "add_file"}:
+ if name in {"add-file",
+ "add_file"}:
try:
from pathlib import Path as _Path
@@ -2254,7 +2459,9 @@ class PipelineExecutor:
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
- if low in {"-path", "--path", "-p"} and i + 1 < len(toks):
+ if low in {"-path",
+ "--path",
+ "-p"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and ("," not in nxt):
p = _Path(nxt)
@@ -2271,7 +2478,8 @@ class PipelineExecutor:
# Display-only: avoid Live progress for relationship viewing.
# This keeps `@1 | get-relationship` clean and prevents progress UI
# from interfering with Rich tables/panels.
- if name in {"get-relationship", "get-rel"}:
+ if name in {"get-relationship",
+ "get-rel"}:
continue
# `.pipe` (MPV) is an interactive launcher; disable pipeline Live progress
# for it because it doesn't meaningfully "complete" (mpv may keep running)
@@ -2284,7 +2492,8 @@ class PipelineExecutor:
continue
# `delete-file` prints a Rich table directly; Live progress interferes and
# can truncate/overwrite the output.
- if name in {"delete-file", "del-file"}:
+ if name in {"delete-file",
+ "del-file"}:
continue
pipe_stage_indices.append(idx)
pipe_labels.append(name)
@@ -2300,7 +2509,8 @@ class PipelineExecutor:
except Exception:
pass
pipe_index_by_stage = {
- stage_idx: pipe_idx for pipe_idx, stage_idx in enumerate(pipe_stage_indices)
+ stage_idx: pipe_idx
+ for pipe_idx, stage_idx in enumerate(pipe_stage_indices)
}
except Exception:
progress_ui = None
@@ -2326,7 +2536,8 @@ class PipelineExecutor:
# Preflight (URL-duplicate prompts, etc.) should be cached within a single
# pipeline run, not across independent pipelines.
try:
- ctx.store_value("preflight", {})
+ ctx.store_value("preflight",
+ {})
except Exception:
pass
@@ -2346,23 +2557,32 @@ class PipelineExecutor:
first_stage_select_all,
) = self._extract_first_stage_selection_tokens(stages)
first_stage_selection_indices = self._apply_select_all_if_requested(
- ctx, first_stage_selection_indices, first_stage_select_all
+ ctx,
+ first_stage_selection_indices,
+ first_stage_select_all
)
piped_result: Any = None
worker_manager = WorkerManagerRegistry.ensure(config)
pipeline_text = " | ".join(" ".join(stage) for stage in stages)
pipeline_session = WorkerStages.begin_pipeline(
- worker_manager, pipeline_text=pipeline_text, config=config
+ worker_manager,
+ pipeline_text=pipeline_text,
+ config=config
)
raw_stage_texts = self._get_raw_stage_texts(ctx)
- self._maybe_enable_background_notifier(worker_manager, config, pipeline_session)
+ self._maybe_enable_background_notifier(
+ worker_manager,
+ config,
+ pipeline_session
+ )
pipeline_status = "completed"
pipeline_error = ""
progress_ui = None
- pipe_index_by_stage: Dict[int, int] = {}
+ pipe_index_by_stage: Dict[int,
+ int] = {}
try:
ok, initial_piped = self._maybe_apply_initial_selection(
@@ -2411,10 +2631,13 @@ class PipelineExecutor:
from cmdlet._shared import coerce_to_pipe_object
try:
- pipe_items = [coerce_to_pipe_object(x) for x in list(last_items)]
+ pipe_items = [
+ coerce_to_pipe_object(x) for x in list(last_items)
+ ]
except Exception:
pipe_items = list(last_items)
- piped_result = pipe_items if len(pipe_items) > 1 else pipe_items[0]
+ piped_result = pipe_items if len(pipe_items
+ ) > 1 else pipe_items[0]
try:
ctx.set_last_items(pipe_items)
except Exception:
@@ -2422,7 +2645,8 @@ class PipelineExecutor:
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
- pipeline_session.worker_id, "@ used last result items"
+ pipeline_session.worker_id,
+ "@ used last result items"
)
except Exception:
pass
@@ -2436,14 +2660,16 @@ class PipelineExecutor:
return
piped_result = subject
try:
- subject_items = subject if isinstance(subject, list) else [subject]
+ subject_items = subject if isinstance(subject,
+ list) else [subject]
ctx.set_last_items(subject_items)
except Exception:
pass
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
- pipeline_session.worker_id, "@ used current table subject"
+ pipeline_session.worker_id,
+ "@ used current table subject"
)
except Exception:
pass
@@ -2466,8 +2692,8 @@ class PipelineExecutor:
try:
display_table = (
ctx.get_display_table()
- if hasattr(ctx, "get_display_table")
- else None
+ if hasattr(ctx,
+ "get_display_table") else None
)
except Exception:
display_table = None
@@ -2482,19 +2708,21 @@ class PipelineExecutor:
items_list = ctx.get_last_result_items() or []
else:
if hasattr(ctx, "get_last_selectable_result_items"):
- items_list = ctx.get_last_selectable_result_items() or []
+ items_list = ctx.get_last_selectable_result_items(
+ ) or []
else:
items_list = ctx.get_last_result_items() or []
if is_select_all:
selected_indices = list(range(len(items_list)))
else:
- selected_indices = sorted([i - 1 for i in selection]) # type: ignore[arg-type]
+ selected_indices = sorted(
+ [i - 1 for i in selection]
+ ) # type: ignore[arg-type]
resolved_items = items_list if items_list else []
filtered = [
- resolved_items[i]
- for i in selected_indices
+ resolved_items[i] for i in selected_indices
if 0 <= i < len(resolved_items)
]
if not filtered:
@@ -2506,7 +2734,8 @@ class PipelineExecutor:
# UX: selecting a single URL row from get-url tables should open it.
# Only do this when the selection stage is terminal to avoid surprising
# side-effects in pipelines like `@1 | download-file`.
- current_table = ctx.get_current_stage_table() or ctx.get_last_result_table()
+ current_table = ctx.get_current_stage_table(
+ ) or ctx.get_last_result_table()
if (not is_select_all) and (len(filtered) == 1):
try:
PipelineExecutor._maybe_open_url_selection(
@@ -2518,26 +2747,31 @@ class PipelineExecutor:
pass
if PipelineExecutor._maybe_run_class_selector(
- ctx, config, filtered, stage_is_last=(stage_index + 1 >= len(stages))
- ):
+ ctx,
+ config,
+ filtered,
+ stage_is_last=(stage_index + 1 >= len(stages))):
return
# Special case: selecting multiple tags from get-tag and piping into delete-tag
# should batch into a single operation (one backend call).
next_cmd = None
try:
- if stage_index + 1 < len(stages) and stages[stage_index + 1]:
- next_cmd = str(stages[stage_index + 1][0]).replace("_", "-").lower()
+ if stage_index + 1 < len(stages) and stages[stage_index +
+ 1]:
+ next_cmd = str(stages[stage_index + 1][0]
+ ).replace("_",
+ "-").lower()
except Exception:
next_cmd = None
def _is_tag_row(obj: Any) -> bool:
try:
- if (
- hasattr(obj, "__class__")
- and obj.__class__.__name__ == "TagItem"
- and hasattr(obj, "tag_name")
- ):
+ if (hasattr(obj,
+ "__class__")
+ and obj.__class__.__name__ == "TagItem"
+ and hasattr(obj,
+ "tag_name")):
return True
except Exception:
pass
@@ -2548,11 +2782,9 @@ class PipelineExecutor:
pass
return False
- if (
- next_cmd in {"delete-tag", "delete_tag"}
- and len(filtered) > 1
- and all(_is_tag_row(x) for x in filtered)
- ):
+ if (next_cmd in {"delete-tag",
+ "delete_tag"} and len(filtered) > 1
+ and all(_is_tag_row(x) for x in filtered)):
from cmdlet._shared import get_field
tags: List[str] = []
@@ -2568,9 +2800,11 @@ class PipelineExecutor:
if first_store is None:
first_store = get_field(item, "store")
if first_path is None:
- first_path = get_field(item, "path") or get_field(
- item, "target"
- )
+ first_path = get_field(item,
+ "path") or get_field(
+ item,
+ "target"
+ )
if tags:
grouped = {
@@ -2586,18 +2820,20 @@ class PipelineExecutor:
from cmdlet._shared import coerce_to_pipe_object
- filtered_pipe_objs = [coerce_to_pipe_object(item) for item in filtered]
+ filtered_pipe_objs = [
+ coerce_to_pipe_object(item) for item in filtered
+ ]
piped_result = (
filtered_pipe_objs
- if len(filtered_pipe_objs) > 1
- else filtered_pipe_objs[0]
+ if len(filtered_pipe_objs) > 1 else filtered_pipe_objs[0]
)
- current_table = ctx.get_current_stage_table() or ctx.get_last_result_table()
+ current_table = ctx.get_current_stage_table(
+ ) or ctx.get_last_result_table()
table_type = (
current_table.table
- if current_table and hasattr(current_table, "table")
- else None
+ if current_table and hasattr(current_table,
+ "table") else None
)
if table_type == "youtube" and stage_index + 1 >= len(stages):
print("Auto-running YouTube selection via download-media")
@@ -2642,16 +2878,16 @@ class PipelineExecutor:
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
- if (
- cmd_name == "add-file"
- and low in {"-path", "--path", "-p"}
- and i + 1 < len(toks)
- ):
+ if (cmd_name == "add-file" and low in {"-path",
+ "--path",
+ "-p"}
+ and i + 1 < len(toks)):
nxt = str(toks[i + 1])
if nxt:
if "," in nxt:
parts = [
- p.strip().strip("\"'") for p in nxt.split(",")
+ p.strip().strip("\"'")
+ for p in nxt.split(",")
]
parts = [p for p in parts if p]
if parts:
@@ -2662,15 +2898,17 @@ class PipelineExecutor:
preview.append(nxt)
i += 2
continue
- if low in {"-url", "--url"} and i + 1 < len(toks):
+ if low in {"-url",
+ "--url"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and not nxt.startswith("-"):
preview.append(nxt)
i += 2
continue
- if (not t.startswith("-")) and (
- "://" in low or low.startswith(("magnet:", "torrent:"))
- ):
+ if (not t.startswith("-")) and ("://" in low
+ or low.startswith(
+ ("magnet:",
+ "torrent:"))):
preview.append(t)
i += 1
@@ -2678,7 +2916,9 @@ class PipelineExecutor:
total_items = len(preview) if preview else 1
progress_ui.begin_pipe(
- pipe_idx, total_items=int(total_items), items_preview=preview_items
+ pipe_idx,
+ total_items=int(total_items),
+ items_preview=preview_items
)
except Exception:
pass
@@ -2687,7 +2927,11 @@ class PipelineExecutor:
if progress_ui is not None and pipe_idx is not None:
_ui = cast(Any, progress_ui)
- def _on_emit(obj: Any, _idx: int = int(pipe_idx), _progress=_ui) -> None:
+ def _on_emit(
+ obj: Any,
+ _idx: int = int(pipe_idx),
+ _progress=_ui
+ ) -> None:
try:
_progress.on_emit(_idx, obj)
except Exception:
@@ -2725,15 +2969,19 @@ class PipelineExecutor:
try:
if hasattr(ctx, "set_current_stage_text"):
stage_text = ""
- if raw_stage_texts and stage_index < len(raw_stage_texts):
- candidate = str(raw_stage_texts[stage_index] or "").strip()
+ if raw_stage_texts and stage_index < len(raw_stage_texts
+ ):
+ candidate = str(raw_stage_texts[stage_index]
+ or "").strip()
if candidate:
try:
cand_tokens = shlex.split(candidate)
except Exception:
cand_tokens = candidate.split()
if cand_tokens:
- first = str(cand_tokens[0]).replace("_", "-").lower()
+ first = str(cand_tokens[0]
+ ).replace("_",
+ "-").lower()
if first == cmd_name:
stage_text = candidate
if not stage_text:
@@ -2744,11 +2992,8 @@ class PipelineExecutor:
# `.pipe` is typically the terminal interactive stage (MPV UI).
# Stop Live progress before running it so output doesn't get stuck behind Live.
- if (
- cmd_name == ".pipe"
- and progress_ui is not None
- and (stage_index + 1 >= len(stages))
- ):
+ if (cmd_name == ".pipe" and progress_ui is not None
+ and (stage_index + 1 >= len(stages))):
try:
progress_ui.stop()
except Exception:
@@ -2770,8 +3015,8 @@ class PipelineExecutor:
try:
stop_req = (
ctx.get_pipeline_stop()
- if hasattr(ctx, "get_pipeline_stop")
- else None
+ if hasattr(ctx,
+ "get_pipeline_stop") else None
)
except Exception:
stop_req = None
@@ -2808,8 +3053,8 @@ class PipelineExecutor:
try:
has_overlay = (
bool(ctx.get_display_table())
- if hasattr(ctx, "get_display_table")
- else False
+ if hasattr(ctx,
+ "get_display_table") else False
)
except Exception:
has_overlay = False
@@ -2824,59 +3069,58 @@ class PipelineExecutor:
# the table and pause the pipeline so the user can pick @N.
stage_table = (
ctx.get_current_stage_table()
- if hasattr(ctx, "get_current_stage_table")
- else None
+ if hasattr(ctx,
+ "get_current_stage_table") else None
)
stage_table_type = (
- str(getattr(stage_table, "table", "") or "").strip().lower()
- if stage_table
- else ""
+ str(getattr(stage_table,
+ "table",
+ "") or "").strip().lower()
+ if stage_table else ""
)
try:
stage_table_source = (
- str(getattr(stage_table, "source_command", "") or "")
- .strip()
- .replace("_", "-")
- .lower()
- if stage_table
- else ""
+ str(getattr(stage_table,
+ "source_command",
+ "") or "").strip().replace("_",
+ "-").lower()
+ if stage_table else ""
)
except Exception:
stage_table_source = ""
- if (
- (not stage_is_last)
- and (not emits)
- and cmd_name
- in {
+ if ((not stage_is_last) and (not emits) and cmd_name in {
"download-media",
"download_media",
"download-data",
"download_data",
- }
- and stage_table is not None
- and (
- stage_table_type
- in {
+ } and stage_table is not None
+ and (stage_table_type in {
"ytdlp.formatlist",
"download-media",
"download_media",
"bandcamp",
"youtube",
- }
- or stage_table_source in {"download-media", "download_media"}
- or stage_table_type in {"internetarchive.formats"}
- or stage_table_source in {"download-file"}
- )
- ):
+ } or stage_table_source in {"download-media",
+ "download_media"}
+ or stage_table_type in {"internetarchive.formats"}
+ or stage_table_source in {"download-file"})):
try:
- is_selectable = not bool(getattr(stage_table, "no_choice", False))
+ is_selectable = not bool(
+ getattr(stage_table,
+ "no_choice",
+ False)
+ )
except Exception:
is_selectable = True
if is_selectable:
try:
already_rendered = bool(
- getattr(stage_table, "_rendered_by_cmdlet", False)
+ getattr(
+ stage_table,
+ "_rendered_by_cmdlet",
+ False
+ )
)
except Exception:
already_rendered = False
@@ -2888,7 +3132,8 @@ class PipelineExecutor:
try:
if pipe_idx is not None:
progress_ui.finish_pipe(
- int(pipe_idx), force_complete=True
+ int(pipe_idx),
+ force_complete=True
)
except Exception:
pass
@@ -2899,7 +3144,8 @@ class PipelineExecutor:
try:
import pipeline as _pipeline_ctx
- if hasattr(_pipeline_ctx, "set_live_progress"):
+ if hasattr(_pipeline_ctx,
+ "set_live_progress"):
_pipeline_ctx.set_live_progress(None)
except Exception:
pass
@@ -2912,15 +3158,20 @@ class PipelineExecutor:
# pending downstream stages.
try:
- remaining = stages[stage_index + 1 :]
+ remaining = stages[stage_index + 1:]
source_cmd = (
ctx.get_current_stage_table_source_command()
- if hasattr(ctx, "get_current_stage_table_source_command")
- else None
+ if hasattr(
+ ctx,
+ "get_current_stage_table_source_command"
+ ) else None
)
- if remaining and hasattr(ctx, "set_pending_pipeline_tail"):
+ if remaining and hasattr(
+ ctx,
+ "set_pending_pipeline_tail"):
ctx.set_pending_pipeline_tail(
- remaining, source_command=source_cmd or cmd_name
+ remaining,
+ source_command=source_cmd or cmd_name
)
except Exception:
pass
@@ -2937,7 +3188,9 @@ class PipelineExecutor:
if pipe_idx is not None:
progress_ui.finish_pipe(
int(pipe_idx),
- force_complete=(stage_status == "completed"),
+ force_complete=(
+ stage_status == "completed"
+ ),
)
except Exception:
pass
@@ -2958,8 +3211,8 @@ class PipelineExecutor:
try:
final_table = (
ctx.get_display_table()
- if hasattr(ctx, "get_display_table")
- else None
+ if hasattr(ctx,
+ "get_display_table") else None
)
except Exception:
final_table = None
@@ -2970,37 +3223,33 @@ class PipelineExecutor:
# common for `stage_table` to still point at the previous stage's table
# (e.g. add-file's canonical store table). In that case, prefer rendering
# the emitted results so the user sees the actual output of this stage.
- if (
- emits
- and (
- ctx.get_display_table()
- if hasattr(ctx, "get_display_table")
- else None
- )
- is None
- ):
+ if (emits and (ctx.get_display_table() if hasattr(
+ ctx,
+ "get_display_table") else None) is None):
try:
src_cmd = (
- str(getattr(final_table, "source_command", "") or "")
- .strip()
- .lower()
- if final_table
- else ""
+ str(
+ getattr(final_table,
+ "source_command",
+ "") or ""
+ ).strip().lower() if final_table else ""
)
except Exception:
src_cmd = ""
try:
- cur_cmd = str(cmd_name or "").strip().replace("_", "-").lower()
+ cur_cmd = str(cmd_name
+ or "").strip().replace("_",
+ "-").lower()
except Exception:
cur_cmd = ""
- if (
- (final_table is None)
- or (not src_cmd)
- or (src_cmd.replace("_", "-") != cur_cmd)
- ):
+ if ((final_table is None) or (not src_cmd)
+ or (src_cmd.replace("_",
+ "-") != cur_cmd)):
try:
table_title = CmdletExecutor._get_table_title_for_command(
- cmd_name, emits, list(stage_args)
+ cmd_name,
+ emits,
+ list(stage_args)
)
except Exception:
table_title = "Results"
@@ -3008,8 +3257,12 @@ class PipelineExecutor:
for item in emits:
table.add_result(item)
try:
- if hasattr(ctx, "set_last_result_table_overlay"):
- ctx.set_last_result_table_overlay(table, emits)
+ if hasattr(ctx,
+ "set_last_result_table_overlay"):
+ ctx.set_last_result_table_overlay(
+ table,
+ emits
+ )
if hasattr(ctx, "set_current_stage_table"):
ctx.set_current_stage_table(table)
except Exception:
@@ -3019,7 +3272,11 @@ class PipelineExecutor:
if final_table is not None:
try:
already_rendered = bool(
- getattr(final_table, "_rendered_by_cmdlet", False)
+ getattr(
+ final_table,
+ "_rendered_by_cmdlet",
+ False
+ )
)
except Exception:
already_rendered = False
@@ -3048,7 +3305,8 @@ class PipelineExecutor:
if progress_ui is not None and pipe_idx is not None:
try:
progress_ui.finish_pipe(
- int(pipe_idx), force_complete=(stage_status == "completed")
+ int(pipe_idx),
+ force_complete=(stage_status == "completed")
)
except Exception:
pass
@@ -3063,7 +3321,10 @@ class PipelineExecutor:
except Exception:
pass
if stage_session:
- stage_session.close(status=stage_status, error_msg=stage_error)
+ stage_session.close(
+ status=stage_status,
+ error_msg=stage_error
+ )
elif pipeline_session and worker_manager:
try:
worker_manager.log_step(
@@ -3075,7 +3336,8 @@ class PipelineExecutor:
if not stages and piped_result is not None:
table = ResultTable("Selection Result")
- items = piped_result if isinstance(piped_result, list) else [piped_result]
+ items = piped_result if isinstance(piped_result,
+ list) else [piped_result]
for item in items:
table.add_result(item)
ctx.set_last_result_items_only(items)
@@ -3106,7 +3368,10 @@ class PipelineExecutor:
except Exception:
pass
if pipeline_session:
- pipeline_session.close(status=pipeline_status, error_msg=pipeline_error)
+ pipeline_session.close(
+ status=pipeline_status,
+ error_msg=pipeline_error
+ )
except Exception as exc:
print(f"[error] Failed to execute pipeline: {exc}\n")
@@ -3139,9 +3404,13 @@ class MedeiaCLI:
def build_app(self) -> typer.Typer:
app = typer.Typer(help="Medeia-Macina CLI")
- def _validate_pipeline_option(ctx: typer.Context, param: typer.CallbackParam, value: str):
+ def _validate_pipeline_option(
+ ctx: typer.Context,
+ param: typer.CallbackParam,
+ value: str
+ ):
try:
- from cli_syntax import validate_pipeline_text
+ from SYS.cli_syntax import validate_pipeline_text
syntax_error = validate_pipeline_text(value)
if syntax_error:
@@ -3169,7 +3438,8 @@ class MedeiaCLI:
inc = (incomplete or "").lower()
return [
- CompletionItem(name) for name in choices if name and name.lower().startswith(inc)
+ CompletionItem(name) for name in choices
+ if name and name.lower().startswith(inc)
]
@app.command("search-provider")
@@ -3181,11 +3451,22 @@ class MedeiaCLI:
help="Provider name (bandcamp, libgen, soulseek, youtube)",
shell_complete=_complete_search_provider,
),
- query: str = typer.Argument(..., help="Search query (quote for spaces)"),
- limit: int = typer.Option(36, "--limit", "-l", help="Maximum results to return"),
+ query: str = typer.Argument(...,
+ help="Search query (quote for spaces)"),
+ limit: int = typer.Option(
+ 36,
+ "--limit",
+ "-l",
+ help="Maximum results to return"
+ ),
) -> None:
self._cmdlet_executor.execute(
- "search-provider", ["-provider", provider, query, "-limit", str(limit)]
+ "search-provider",
+ ["-provider",
+ provider,
+ query,
+ "-limit",
+ str(limit)]
)
@app.command("pipeline")
@@ -3198,7 +3479,10 @@ class MedeiaCLI:
callback=_validate_pipeline_option,
),
seeds_json: Optional[str] = typer.Option(
- None, "--seeds-json", "-s", help="JSON string of seed items"
+ None,
+ "--seeds-json",
+ "-s",
+ help="JSON string of seed items"
),
) -> None:
import pipeline as ctx
@@ -3218,7 +3502,7 @@ class MedeiaCLI:
return
try:
- from cli_syntax import validate_pipeline_text
+ from SYS.cli_syntax import validate_pipeline_text
syntax_error = validate_pipeline_text(command)
if syntax_error:
@@ -3254,16 +3538,22 @@ class MedeiaCLI:
# the cmdlet system without Typer trying to parse them.
try:
names = list_cmdlet_names()
- skip = {"search-provider", "pipeline", "repl"}
+ skip = {"search-provider",
+ "pipeline",
+ "repl"}
for nm in names:
if not nm or nm in skip:
continue
# create a scoped handler to capture the command name
def _make_handler(cmd_name: str):
+
@app.command(
cmd_name,
- context_settings={"ignore_unknown_options": True, "allow_extra_args": True},
+ context_settings={
+ "ignore_unknown_options": True,
+ "allow_extra_args": True
+ },
)
def _handler(ctx: typer.Context):
try:
@@ -3285,7 +3575,10 @@ class MedeiaCLI:
# Ensure Rich tracebacks are active even when invoking subcommands.
try:
config = self._config_loader.load()
- debug_enabled = bool(config.get("debug", False)) if isinstance(config, dict) else False
+ debug_enabled = bool(config.get("debug",
+ False)
+ ) if isinstance(config,
+ dict) else False
except Exception:
debug_enabled = False
@@ -3321,17 +3614,30 @@ class MedeiaCLI:
# Build root layout
root = Layout(name="root")
root.split_row(
- Layout(name="left", ratio=2),
- Layout(name="center", ratio=8),
- Layout(name="right", ratio=2),
+ Layout(name="left",
+ ratio=2),
+ Layout(name="center",
+ ratio=8),
+ Layout(name="right",
+ ratio=2),
)
# Left pillar → forward rainbow
- root["left"].update(Panel(rainbow_pillar(RAINBOW, height=21, bar_width=36), title="DELTA"))
+ root["left"].update(
+ Panel(rainbow_pillar(RAINBOW,
+ height=21,
+ bar_width=36),
+ title="DELTA")
+ )
# Right pillar → reverse rainbow
root["right"].update(
- Panel(rainbow_pillar(list(reversed(RAINBOW)), height=21, bar_width=36), title="LAMBDA")
+ Panel(
+ rainbow_pillar(list(reversed(RAINBOW)),
+ height=21,
+ bar_width=36),
+ title="LAMBDA"
+ )
)
# Center content
@@ -3442,7 +3748,9 @@ Come to love it when others take what you share, as there is no greater joy
if _has_store_subtype(config, "hydrusnetwork"):
store_cfg = config.get("store")
hydrus_cfg = (
- store_cfg.get("hydrusnetwork", {}) if isinstance(store_cfg, dict) else {}
+ store_cfg.get("hydrusnetwork",
+ {}) if isinstance(store_cfg,
+ dict) else {}
)
if isinstance(hydrus_cfg, dict):
for instance_name, instance_cfg in hydrus_cfg.items():
@@ -3451,7 +3759,10 @@ Come to love it when others take what you share, as there is no greater joy
name_key = str(instance_cfg.get("NAME") or instance_name)
url_val = str(instance_cfg.get("URL") or "").strip()
- ok = bool(store_registry and store_registry.is_available(name_key))
+ ok = bool(
+ store_registry
+ and store_registry.is_available(name_key)
+ )
status = "ENABLED" if ok else "DISABLED"
if ok:
total = None
@@ -3460,28 +3771,40 @@ Come to love it when others take what you share, as there is no greater joy
backend = store_registry[name_key]
total = getattr(backend, "total_count", None)
if total is None:
- getter = getattr(backend, "get_total_count", None)
+ getter = getattr(
+ backend,
+ "get_total_count",
+ None
+ )
if callable(getter):
total = getter()
except Exception:
total = None
detail = url_val
- files = total if isinstance(total, int) and total >= 0 else None
+ files = total if isinstance(
+ total,
+ int
+ ) and total >= 0 else None
else:
err = None
if store_registry:
err = store_registry.get_backend_error(
instance_name
) or store_registry.get_backend_error(name_key)
- detail = (url_val + (" - " if url_val else "")) + (
- err or "Unavailable"
- )
+ detail = (url_val + (" - " if url_val else "")
+ ) + (err or "Unavailable")
files = None
_add_startup_check(
- status, name_key, store="hydrusnetwork", files=files, detail=detail
+ status,
+ name_key,
+ store="hydrusnetwork",
+ files=files,
+ detail=detail
)
- provider_cfg = config.get("provider") if isinstance(config, dict) else None
+ provider_cfg = config.get("provider"
+ ) if isinstance(config,
+ dict) else None
if isinstance(provider_cfg, dict) and provider_cfg:
from Provider.metadata_provider import list_metadata_providers
from ProviderCore.registry import (
@@ -3520,7 +3843,8 @@ Come to love it when others take what you share, as there is no greater joy
from Provider.libgen import MIRRORS
mirrors = [
- str(x).rstrip("/") for x in (MIRRORS or []) if str(x).strip()
+ str(x).rstrip("/") for x in (MIRRORS or [])
+ if str(x).strip()
]
return [m + "/json.php" for m in mirrors]
return []
@@ -3549,11 +3873,18 @@ Come to love it when others take what you share, as there is no greater joy
api_key = _get_debrid_api_key(config)
if not api_key:
_add_startup_check(
- "DISABLED", display, provider=prov, detail="Not configured"
+ "DISABLED",
+ display,
+ provider=prov,
+ detail="Not configured"
)
else:
client = AllDebridClient(api_key)
- base_url = str(getattr(client, "base_url", "") or "").strip()
+ base_url = str(
+ getattr(client,
+ "base_url",
+ "") or ""
+ ).strip()
_add_startup_check(
"ENABLED",
display,
@@ -3562,7 +3893,10 @@ Come to love it when others take what you share, as there is no greater joy
)
except Exception as exc:
_add_startup_check(
- "DISABLED", display, provider=prov, detail=str(exc)
+ "DISABLED",
+ display,
+ provider=prov,
+ detail=str(exc)
)
continue
@@ -3583,7 +3917,10 @@ Come to love it when others take what you share, as there is no greater joy
if not is_known:
_add_startup_check(
- "UNKNOWN", display, provider=prov, detail="Not registered"
+ "UNKNOWN",
+ display,
+ provider=prov,
+ detail="Not registered"
)
else:
detail = "Configured" if ok_val else "Not configured"
@@ -3594,7 +3931,8 @@ Come to love it when others take what you share, as there is no greater joy
detail = ping_detail
else:
detail = (
- (detail + " | " + ping_detail) if ping_detail else detail
+ (detail + " | " +
+ ping_detail) if ping_detail else detail
)
_add_startup_check(
"ENABLED" if ok_val else "DISABLED",
@@ -3605,23 +3943,31 @@ Come to love it when others take what you share, as there is no greater joy
already_checked.add(prov)
- default_search_providers = ["openlibrary", "libgen", "youtube", "bandcamp"]
+ default_search_providers = [
+ "openlibrary",
+ "libgen",
+ "youtube",
+ "bandcamp"
+ ]
for prov in default_search_providers:
if prov in already_checked:
continue
display = _provider_display_name(prov)
ok_val = (
bool(search_availability.get(prov))
- if prov in search_availability
- else False
+ if prov in search_availability else False
)
ping_targets = _default_provider_ping_targets(prov)
ping_ok, ping_detail = (
_ping_first(ping_targets) if ping_targets else (False, "No ping target")
)
- detail = ping_detail or ("Available" if ok_val else "Unavailable")
+ detail = ping_detail or (
+ "Available" if ok_val else "Unavailable"
+ )
if not ok_val:
- detail = "Unavailable" + (f" | {ping_detail}" if ping_detail else "")
+ detail = "Unavailable" + (
+ f" | {ping_detail}" if ping_detail else ""
+ )
_add_startup_check(
"ENABLED" if (ok_val and ping_ok) else "DISABLED",
display,
@@ -3633,13 +3979,14 @@ Come to love it when others take what you share, as there is no greater joy
if "0x0" not in already_checked:
ok_val = (
bool(file_availability.get("0x0"))
- if "0x0" in file_availability
- else False
+ if "0x0" in file_availability else False
)
ping_ok, ping_detail = _ping_url("https://0x0.st")
detail = ping_detail
if not ok_val:
- detail = "Unavailable" + (f" | {ping_detail}" if ping_detail else "")
+ detail = "Unavailable" + (
+ f" | {ping_detail}" if ping_detail else ""
+ )
_add_startup_check(
"ENABLED" if (ok_val and ping_ok) else "DISABLED",
"0x0",
@@ -3653,9 +4000,10 @@ Come to love it when others take what you share, as there is no greater joy
provider = Matrix(config)
matrix_conf = (
- config.get("provider", {}).get("matrix", {})
- if isinstance(config, dict)
- else {}
+ config.get("provider",
+ {}).get("matrix",
+ {}) if isinstance(config,
+ dict) else {}
)
homeserver = str(matrix_conf.get("homeserver") or "").strip()
room_id = str(matrix_conf.get("room_id") or "").strip()
@@ -3663,34 +4011,51 @@ Come to love it when others take what you share, as there is no greater joy
homeserver = f"https://{homeserver}"
target = homeserver.rstrip("/")
if room_id:
- target = (target + (" " if target else "")) + f"room:{room_id}"
+ target = (
+ target + (" " if target else "")
+ ) + f"room:{room_id}"
_add_startup_check(
"ENABLED" if provider.validate() else "DISABLED",
"Matrix",
provider="matrix",
- detail=target
- or ("Connected" if provider.validate() else "Not configured"),
+ detail=target or
+ ("Connected" if provider.validate() else "Not configured"),
)
except Exception as exc:
- _add_startup_check("DISABLED", "Matrix", provider="matrix", detail=str(exc))
+ _add_startup_check(
+ "DISABLED",
+ "Matrix",
+ provider="matrix",
+ detail=str(exc)
+ )
if _has_store_subtype(config, "folder"):
store_cfg = config.get("store")
- folder_cfg = store_cfg.get("folder", {}) if isinstance(store_cfg, dict) else {}
+ folder_cfg = store_cfg.get("folder",
+ {}) if isinstance(store_cfg,
+ dict) else {}
if isinstance(folder_cfg, dict) and folder_cfg:
for instance_name, instance_cfg in folder_cfg.items():
if not isinstance(instance_cfg, dict):
continue
name_key = str(instance_cfg.get("NAME") or instance_name)
path_val = str(
- instance_cfg.get("PATH") or instance_cfg.get("path") or ""
+ instance_cfg.get("PATH") or instance_cfg.get("path")
+ or ""
).strip()
- ok = bool(store_registry and store_registry.is_available(name_key))
+ ok = bool(
+ store_registry
+ and store_registry.is_available(name_key)
+ )
if ok and store_registry:
backend = store_registry[name_key]
scan_ok = bool(getattr(backend, "scan_ok", True))
- scan_detail = str(getattr(backend, "scan_detail", "") or "")
+ scan_detail = str(
+ getattr(backend,
+ "scan_detail",
+ "") or ""
+ )
stats = getattr(backend, "scan_stats", None)
files = None
if isinstance(stats, dict):
@@ -3698,11 +4063,14 @@ Come to love it when others take what you share, as there is no greater joy
if isinstance(total_db, (int, float)):
files = int(total_db)
status = "SCANNED" if scan_ok else "ERROR"
- detail = (path_val + (" - " if path_val else "")) + (
- scan_detail or "Up to date"
- )
+ detail = (path_val + (" - " if path_val else "")
+ ) + (scan_detail or "Up to date")
_add_startup_check(
- status, name_key, store="folder", files=files, detail=detail
+ status,
+ name_key,
+ store="folder",
+ files=files,
+ detail=detail
)
else:
err = None
@@ -3710,29 +4078,46 @@ Come to love it when others take what you share, as there is no greater joy
err = store_registry.get_backend_error(
instance_name
) or store_registry.get_backend_error(name_key)
- detail = (path_val + (" - " if path_val else "")) + (
- err or "Unavailable"
+ detail = (path_val + (" - " if path_val else "")
+ ) + (err or "Unavailable")
+ _add_startup_check(
+ "ERROR",
+ name_key,
+ store="folder",
+ detail=detail
)
- _add_startup_check("ERROR", name_key, store="folder", detail=detail)
if _has_store_subtype(config, "debrid"):
try:
- from config import get_debrid_api_key
+ from SYS.config import get_debrid_api_key
from API.alldebrid import AllDebridClient
api_key = get_debrid_api_key(config)
if not api_key:
_add_startup_check(
- "DISABLED", "Debrid", store="debrid", detail="Not configured"
+ "DISABLED",
+ "Debrid",
+ store="debrid",
+ detail="Not configured"
)
else:
client = AllDebridClient(api_key)
- base_url = str(getattr(client, "base_url", "") or "").strip()
+ base_url = str(getattr(client,
+ "base_url",
+ "") or "").strip()
_add_startup_check(
- "ENABLED", "Debrid", store="debrid", detail=base_url or "Connected"
+ "ENABLED",
+ "Debrid",
+ store="debrid",
+ detail=base_url or "Connected"
)
except Exception as exc:
- _add_startup_check("DISABLED", "Debrid", store="debrid", detail=str(exc))
+ _add_startup_check(
+ "DISABLED",
+ "Debrid",
+ store="debrid",
+ detail=str(exc)
+ )
try:
from tool.ytdlp import YtDlpTool
@@ -3796,21 +4181,25 @@ Come to love it when others take what you share, as there is no greater joy
def clear_toolbar() -> None:
toolbar_state.text = ""
toolbar_state.clear_timer = None
- if session is not None and hasattr(session, "app") and session.app.is_running:
+ if session is not None and hasattr(
+ session,
+ "app") and session.app.is_running:
session.app.invalidate()
toolbar_state.clear_timer = threading.Timer(3.0, clear_toolbar)
toolbar_state.clear_timer.daemon = True
toolbar_state.clear_timer.start()
- if session is not None and hasattr(session, "app") and session.app.is_running:
+ if session is not None and hasattr(session,
+ "app") and session.app.is_running:
session.app.invalidate()
self._pipeline_executor.set_toolbar_output(update_toolbar)
completer = CmdletCompleter(config_loader=self._config_loader)
session = PromptSession(
- completer=cast(Any, completer),
+ completer=cast(Any,
+ completer),
lexer=MedeiaLexer(),
style=style,
bottom_toolbar=get_toolbar,
@@ -3828,10 +4217,13 @@ Come to love it when others take what you share, as there is no greater joy
continue
low = user_input.lower()
- if low in {"exit", "quit", "q"}:
+ if low in {"exit",
+ "quit",
+ "q"}:
print("He who is victorious through deceit is defeated by the truth.")
break
- if low in {"help", "?"}:
+ if low in {"help",
+ "?"}:
CmdletHelp.show_cmdlet_list()
continue
@@ -3845,7 +4237,7 @@ Come to love it when others take what you share, as there is no greater joy
pipeline_ctx_ref = None
try:
- from cli_syntax import validate_pipeline_text
+ from SYS.cli_syntax import validate_pipeline_text
syntax_error = validate_pipeline_text(user_input)
if syntax_error:
@@ -3869,7 +4261,9 @@ Come to love it when others take what you share, as there is no greater joy
if ctx.restore_next_result_table():
last_table = (
- ctx.get_display_table() if hasattr(ctx, "get_display_table") else None
+ ctx.get_display_table()
+ if hasattr(ctx,
+ "get_display_table") else None
)
if last_table is None:
last_table = ctx.get_last_result_table()
@@ -3881,7 +4275,9 @@ Come to love it when others take what you share, as there is no greater joy
items = ctx.get_last_result_items()
if items:
ctx.set_current_stage_table(None)
- print(f"Restored {len(items)} items (no table format available)")
+ print(
+ f"Restored {len(items)} items (no table format available)"
+ )
else:
print("No forward history available", file=sys.stderr)
except Exception as exc:
@@ -3894,7 +4290,9 @@ Come to love it when others take what you share, as there is no greater joy
if ctx.restore_previous_result_table():
last_table = (
- ctx.get_display_table() if hasattr(ctx, "get_display_table") else None
+ ctx.get_display_table()
+ if hasattr(ctx,
+ "get_display_table") else None
)
if last_table is None:
last_table = ctx.get_last_result_table()
@@ -3903,39 +4301,51 @@ Come to love it when others take what you share, as there is no greater joy
# so row payloads (titles/tags) reflect latest store state.
try:
src_cmd = (
- getattr(last_table, "source_command", None) if last_table else None
+ getattr(last_table,
+ "source_command",
+ None) if last_table else None
)
- if (
- isinstance(src_cmd, str)
- and src_cmd.lower().replace("_", "-") == "search-store"
- ):
+ if (isinstance(src_cmd,
+ str)
+ and src_cmd.lower().replace("_",
+ "-") == "search-store"):
src_args = (
- getattr(last_table, "source_args", None) if last_table else None
+ getattr(last_table,
+ "source_args",
+ None) if last_table else None
)
- base_args = list(src_args) if isinstance(src_args, list) else []
+ base_args = list(src_args
+ ) if isinstance(src_args,
+ list) else []
cleaned_args = [
- str(a)
- for a in base_args
- if str(a).strip().lower() not in {"--refresh", "-refresh"}
+ str(a) for a in base_args if str(a).strip().lower()
+ not in {"--refresh", "-refresh"}
]
if hasattr(ctx, "set_current_command_text"):
try:
title_text = (
- getattr(last_table, "title", None)
- if last_table
- else None
+ getattr(last_table,
+ "title",
+ None) if last_table else None
)
- if isinstance(title_text, str) and title_text.strip():
- ctx.set_current_command_text(title_text.strip())
+ if isinstance(title_text,
+ str) and title_text.strip():
+ ctx.set_current_command_text(
+ title_text.strip()
+ )
else:
ctx.set_current_command_text(
- " ".join(["search-store", *cleaned_args]).strip()
+ " ".join(
+ ["search-store",
+ *cleaned_args]
+ ).strip()
)
except Exception:
pass
try:
self._cmdlet_executor.execute(
- "search-store", cleaned_args + ["--refresh"]
+ "search-store",
+ cleaned_args + ["--refresh"]
)
finally:
if hasattr(ctx, "clear_current_command_text"):
@@ -3945,7 +4355,10 @@ Come to love it when others take what you share, as there is no greater joy
pass
continue
except Exception as exc:
- print(f"Error refreshing search-store table: {exc}", file=sys.stderr)
+ print(
+ f"Error refreshing search-store table: {exc}",
+ file=sys.stderr
+ )
if last_table:
stdout_console().print()
@@ -3955,7 +4368,9 @@ Come to love it when others take what you share, as there is no greater joy
items = ctx.get_last_result_items()
if items:
ctx.set_current_stage_table(None)
- print(f"Restored {len(items)} items (no table format available)")
+ print(
+ f"Restored {len(items)} items (no table format available)"
+ )
else:
print("No previous result table in history")
else:
@@ -3969,7 +4384,11 @@ Come to love it when others take what you share, as there is no greater joy
self._pipeline_executor.execute_tokens(tokens)
else:
cmd_name = tokens[0].replace("_", "-").lower()
- is_help = any(arg in {"-help", "--help", "-h"} for arg in tokens[1:])
+ is_help = any(
+ arg in {"-help",
+ "--help",
+ "-h"} for arg in tokens[1:]
+ )
if is_help:
CmdletHelp.show_cmdlet_help(cmd_name)
else:
diff --git a/ENHANCEMENT_SUMMARY.md b/ENHANCEMENT_SUMMARY.md
deleted file mode 100644
index 2e235e5..0000000
--- a/ENHANCEMENT_SUMMARY.md
+++ /dev/null
@@ -1,92 +0,0 @@
-# get-url Command Enhancement Summary
-
-## What Changed
-
-Enhanced the `get-url` command in [cmdlet/get_url.py](cmdlet/get_url.py) to support searching for URLs across all stores with smart pattern matching.
-
-## Key Features Added
-
-### 1. URL Normalization (`_normalize_url_for_search`)
-- Strips protocol prefixes: `https://`, `http://`, `ftp://`, etc.
-- Removes `www.` prefix (case-insensitive)
-- Converts to lowercase for case-insensitive matching
-
-**Examples:**
-- `https://www.youtube.com/watch?v=xx` → `youtube.com/watch?v=xx`
-- `http://www.google.com` → `google.com`
-- `FTP://cdn.example.com` → `cdn.example.com`
-
-### 2. Wildcard Pattern Matching (`_match_url_pattern`)
-- Supports `*` (matches any sequence) and `?` (matches single character)
-- Case-insensitive matching
-- Uses Python's `fnmatch` for robust pattern support
-
-**Examples:**
-- `youtube.com*` matches `youtube.com/watch`, `youtube.com/shorts`, etc.
-- `*.example.com*` matches `cdn.example.com`, `api.example.com`, etc.
-- `google.com/search*` matches `google.com/search?q=term`, etc.
-
-### 3. Cross-Store URL Search (`_search_urls_across_stores`)
-- Searches all configured stores (hydrus, folder, etc.)
-- Finds matching URLs across all files in all stores
-- Returns results grouped by store
-- Emits `UrlItem` objects for pipelining
-
-## Command Usage
-
-### Search for URLs matching a pattern
-```bash
-get-url -url "www.google.com"
-get-url -url "youtube.com*"
-get-url -url "*.example.com*"
-```
-
-### Original usage (unchanged)
-```bash
-@1 | get-url
-# Requires hash and store from piped result
-```
-
-## Implementation Details
-
-### New Methods
-- `_normalize_url_for_search(url)` - Static method to normalize URLs
-- `_match_url_pattern(url, pattern)` - Static method to match with wildcards
-- `_search_urls_across_stores(pattern, config)` - Search across all stores
-
-### Modified Method
-- `run()` - Enhanced to support `-url` flag for searching, fallback to original behavior
-
-### Return Values
-- **Search mode**: List of `UrlItem` objects grouped by store, exit code 0 if found, 1 if no matches
-- **Original mode**: URLs for specific file, exit code 0 if found, 1 if not found
-
-## Testing
-
-A test script is included: [test_get_url_search.py](test_get_url_search.py)
-
-**All tests pass:**
-- ✓ URL normalization (protocol/www stripping)
-- ✓ Wildcard pattern matching
-- ✓ Case-insensitive matching
-- ✓ Complex patterns with subdomains and paths
-
-## Files Modified
-
-- [cmdlet/get_url.py](cmdlet/get_url.py) - Enhanced with URL search functionality
-- [docs/GET_URL_SEARCH.md](docs/GET_URL_SEARCH.md) - User documentation
-- [test_get_url_search.py](test_get_url_search.py) - Test suite
-
-## Backward Compatibility
-
-✓ Fully backward compatible - original usage unchanged:
-- `@1 | get-url` still works as before
-- `-query` flag still works for hash lookups
-- `-store` flag still required for direct lookups
-
-## Error Handling
-
-- Returns exit code 1 if no matches found (search mode)
-- Returns exit code 1 if no store configured
-- Gracefully handles store backend errors
-- Logs errors to stderr without crashing
diff --git a/GET_URL_IMPLEMENTATION.txt b/GET_URL_IMPLEMENTATION.txt
deleted file mode 100644
index 020405f..0000000
--- a/GET_URL_IMPLEMENTATION.txt
+++ /dev/null
@@ -1,195 +0,0 @@
-✅ IMPLEMENTATION COMPLETE: get-url URL Search Enhancement
-
-═══════════════════════════════════════════════════════════════════════════════
-
-WHAT WAS IMPLEMENTED
-────────────────────────────────────────────────────────────────────────────────
-
-Enhanced the `get-url` command to search for URLs across all stores with:
-
-1. PROTOCOL STRIPPING
- - Removes: https://, http://, ftp://, and other scheme prefixes
- - Removes: www. prefix (case-insensitive)
- - Example: https://www.youtube.com/watch?v=abc → youtube.com/watch?v=abc
-
-2. WILDCARD PATTERN MATCHING
- - Asterisk (*): matches any sequence of characters
- - Question mark (?): matches exactly one character
- - Case-insensitive matching
- - Example: youtube.com* matches all YouTube URLs
-
-3. CROSS-STORE SEARCHING
- - Searches all configured stores (Hydrus, Folder, etc.)
- - Finds matching URLs for all files in all stores
- - Returns results grouped by store
- - Emits UrlItem objects for pipelining
-
-═══════════════════════════════════════════════════════════════════════════════
-
-COMMAND USAGE
-────────────────────────────────────────────────────────────────────────────────
-
-SEARCH MODE (NEW):
- get-url -url "www.google.com"
- get-url -url "youtube.com*"
- get-url -url "*.example.com*"
-
-ORIGINAL MODE (UNCHANGED):
- @1 | get-url
-
-═══════════════════════════════════════════════════════════════════════════════
-
-PRACTICAL EXAMPLES
-────────────────────────────────────────────────────────────────────────────────
-
-1. Find all YouTube video URLs:
- $ get-url -url "youtube.com*"
- Results show all files with YouTube URLs
-
-2. Find specific video by URL:
- $ get-url -url "https://www.youtube.com/watch?v=xx_88TDWmEs"
- Returns: youtube.com/watch?v=xx_88tdwmes (normalized pattern)
-
-3. Find by domain:
- $ get-url -url "google.com"
- Matches: google.com, www.google.com/search, google.com/maps
-
-4. Find by subdomain pattern:
- $ get-url -url "*.example.com*"
- Matches: cdn.example.com, api.example.com, www.example.com
-
-5. Find by path pattern:
- $ get-url -url "youtube.com/watch*"
- Matches: youtube.com/watch?v=123 (NOT youtube.com/shorts/abc)
-
-═══════════════════════════════════════════════════════════════════════════════
-
-FILES MODIFIED / CREATED
-────────────────────────────────────────────────────────────────────────────────
-
-MAIN IMPLEMENTATION:
- ✓ cmdlet/get_url.py
- - Added: _normalize_url_for_search() method
- - Added: _match_url_pattern() method
- - Added: _search_urls_across_stores() method
- - Modified: run() method to handle -url flag
- - Lines: 281 total (was 127)
-
-DOCUMENTATION:
- ✓ docs/GET_URL_SEARCH.md - Full feature documentation
- ✓ docs/GET_URL_QUICK_REF.md - Quick reference guide
- ✓ ENHANCEMENT_SUMMARY.md - Technical summary
-
-TESTING:
- ✓ test_get_url_search.py - Comprehensive test suite
- - URL normalization tests: 6/6 passed ✓
- - Pattern matching tests: 9/9 passed ✓
-
-═══════════════════════════════════════════════════════════════════════════════
-
-IMPLEMENTATION DETAILS
-────────────────────────────────────────────────────────────────────────────────
-
-NEW METHODS (Static):
-
- _normalize_url_for_search(url: str) -> str
- Strips protocol and www prefix, returns lowercase
- Examples:
- "https://www.youtube.com/watch?v=xx" → "youtube.com/watch?v=xx"
- "http://www.google.com" → "google.com"
- "ftp://files.example.com" → "files.example.com"
-
- _match_url_pattern(url: str, pattern: str) -> bool
- Normalizes both URL and pattern, uses fnmatch for wildcard matching
- Returns True if URL matches pattern, False otherwise
-
-NEW METHODS (Instance):
-
- _search_urls_across_stores(pattern: str, config: Dict) -> Tuple[List[UrlItem], List[str]]
- Searches all stores for matching URLs
- Returns: (matched_items, stores_searched)
-
-MODIFIED METHOD:
-
- run(result, args, config) -> int
- Now handles:
- 1. If -url flag provided: Search mode
- 2. Otherwise: Original mode (hash+store lookup)
- Maintains full backward compatibility
-
-═══════════════════════════════════════════════════════════════════════════════
-
-BACKWARD COMPATIBILITY
-────────────────────────────────────────────────────────────────────────────────
-
-✓ FULLY COMPATIBLE
- - Original usage: @1 | get-url (unchanged)
- - -query flag: Still works for hash lookups
- - -store flag: Still required for direct lookups
- - Return codes: Unchanged (0 = success, 1 = not found/error)
-
-═══════════════════════════════════════════════════════════════════════════════
-
-TEST RESULTS
-────────────────────────────────────────────────────────────────────────────────
-
-All 15 tests passed ✓
-
-URL Normalization (6 tests):
- ✓ https://www.youtube.com/watch?v=xx_88TDWmEs
- ✓ http://www.google.com
- ✓ ftp://files.example.com/path
- ✓ HTTPS://WWW.EXAMPLE.COM
- ✓ www.example.com
- ✓ example.com
-
-Pattern Matching (9 tests):
- ✓ youtube.com* matches youtube.com/watch
- ✓ youtube.com/watch* matches youtube.com/watch?v=123
- ✓ youtube.com/shorts* does NOT match watch?v=123
- ✓ google.com matches google.com
- ✓ google.com* matches google.com/search
- ✓ *.example.com* matches cdn.example.com
- ✓ *example.com* matches cdn.example.com
- ✓ example.com does NOT match example.org
- ✓ reddit.com* matches reddit.com/r/videos
-
-═══════════════════════════════════════════════════════════════════════════════
-
-NEXT STEPS (OPTIONAL)
-────────────────────────────────────────────────────────────────────────────────
-
-Future enhancements could include:
- 1. Performance optimization: Cache results from stores
- 2. Regex support: --regex flag for complex patterns
- 3. Limit flag: --limit N to cap results
- 4. Filter by store: --store NAME to search specific stores only
- 5. Exclude duplicates: --unique flag to deduplicate URLs
- 6. Export options: --json, --csv output formats
-
-═══════════════════════════════════════════════════════════════════════════════
-
-VERIFICATION
-────────────────────────────────────────────────────────────────────────────────
-
-✓ Python syntax: Valid (py_compile passed)
-✓ Imports: All dependencies available
-✓ Command registration: Successful
-✓ Test suite: All 15 tests pass
-✓ Backward compatibility: Fully maintained
-✓ Error handling: Graceful with stderr logging
-✓ Documentation: Complete with examples
-
-═══════════════════════════════════════════════════════════════════════════════
-
-READY FOR PRODUCTION ✓
-
-The get-url command is now ready to use for URL searching across all stores
-with intelligent pattern matching and normalization.
-
-Usage:
- get-url -url "www.google.com"
- get-url -url "youtube.com*"
- get-url -url "*.example.com*"
-
-═══════════════════════════════════════════════════════════════════════════════
diff --git a/MPV/lyric.py b/MPV/lyric.py
index a5f0403..828a036 100644
--- a/MPV/lyric.py
+++ b/MPV/lyric.py
@@ -41,7 +41,6 @@ from urllib.parse import urlparse
from MPV.mpv_ipc import MPV, MPVIPCClient
-
_TIMESTAMP_RE = re.compile(r"\[(?P\d+):(?P\d{2})(?:\.(?P\d{1,3}))?\]")
_OFFSET_RE = re.compile(r"^\[offset:(?P[+-]?\d+)\]$", re.IGNORECASE)
_HASH_RE = re.compile(r"[0-9a-f]{64}", re.IGNORECASE)
@@ -50,11 +49,9 @@ _HYDRUS_HASH_QS_RE = re.compile(r"hash=([0-9a-f]{64})", re.IGNORECASE)
_WIN_DRIVE_RE = re.compile(r"^[a-zA-Z]:[\\/]")
_WIN_UNC_RE = re.compile(r"^\\\\")
-
_LOG_FH: Optional[TextIO] = None
_SINGLE_INSTANCE_LOCK_FH: Optional[TextIO] = None
-
_LYRIC_VISIBLE_PROP = "user-data/medeia-lyric-visible"
# mpv osd-overlay IDs are scoped to the IPC client connection.
@@ -151,7 +148,13 @@ def _osd_overlay_set_ass(client: MPVIPCClient, ass_text: str) -> Optional[dict]:
def _osd_overlay_clear(client: MPVIPCClient) -> None:
client.send_command(
- {"command": {"name": "osd-overlay", "id": _LYRIC_OSD_OVERLAY_ID, "format": "none"}}
+ {
+ "command": {
+ "name": "osd-overlay",
+ "id": _LYRIC_OSD_OVERLAY_ID,
+ "format": "none"
+ }
+ }
)
@@ -175,7 +178,10 @@ def _ipc_get_property(
*,
raise_on_disconnect: bool = False,
) -> object:
- resp = client.send_command({"command": ["get_property", name]})
+ resp = client.send_command({
+ "command": ["get_property",
+ name]
+ })
if resp is None:
if raise_on_disconnect:
raise ConnectionError("Lost mpv IPC connection")
@@ -234,7 +240,10 @@ def _sanitize_query(s: Optional[str]) -> Optional[str]:
return t if t else None
-def _infer_artist_title_from_tags(tags: List[str]) -> tuple[Optional[str], Optional[str]]:
+def _infer_artist_title_from_tags(
+ tags: List[str]
+) -> tuple[Optional[str],
+ Optional[str]]:
artist = None
title = None
for t in tags or []:
@@ -267,7 +276,10 @@ def _wrap_plain_lyrics_as_lrc(text: str) -> str:
def _fetch_lrclib(
- *, artist: Optional[str], title: Optional[str], duration_s: Optional[float] = None
+ *,
+ artist: Optional[str],
+ title: Optional[str],
+ duration_s: Optional[float] = None
) -> Optional[str]:
base = "https://lrclib.net/api"
@@ -276,10 +288,11 @@ def _fetch_lrclib(
return None
# Try direct get.
- q: Dict[str, str] = {
- "artist_name": artist,
- "track_name": title,
- }
+ q: Dict[str,
+ str] = {
+ "artist_name": artist,
+ "track_name": title,
+ }
if isinstance(duration_s, (int, float)) and duration_s and duration_s > 0:
q["duration"] = str(int(duration_s))
url = f"{base}/get?{urlencode(q)}"
@@ -386,7 +399,7 @@ def parse_lrc(text: str) -> List[LrcLine]:
# Ignore non-timestamp metadata lines like [ar:], [ti:], etc.
continue
- lyric_text = line[matches[-1].end() :].strip()
+ lyric_text = line[matches[-1].end():].strip()
for m in matches:
mm = int(m.group("m"))
ss = int(m.group("s"))
@@ -445,10 +458,11 @@ def _extract_hash_from_target(target: str) -> Optional[str]:
def _load_config_best_effort() -> dict:
try:
- from config import load_config
+ from SYS.config import load_config
cfg = load_config()
- return cfg if isinstance(cfg, dict) else {}
+ return cfg if isinstance(cfg,
+ dict) else {}
except Exception:
return {}
@@ -512,10 +526,11 @@ def _write_temp_sub_file(*, key: str, text: str) -> Path:
tmp_dir.mkdir(parents=True, exist_ok=True)
ext = _infer_sub_extension(text)
- digest = hashlib.sha1((key + "\n" + (text or "")).encode("utf-8", errors="ignore")).hexdigest()[
- :16
- ]
- safe_key = hashlib.sha1((key or "").encode("utf-8", errors="ignore")).hexdigest()[:12]
+ digest = hashlib.sha1((key + "\n" + (text or "")).encode("utf-8",
+ errors="ignore")
+ ).hexdigest()[:16]
+ safe_key = hashlib.sha1((key or "").encode("utf-8",
+ errors="ignore")).hexdigest()[:12]
path = (tmp_dir / f"sub-{safe_key}-{digest}{ext}").resolve()
path.write_text(text or "", encoding="utf-8", errors="replace")
return path
@@ -523,14 +538,23 @@ def _write_temp_sub_file(*, key: str, text: str) -> Path:
def _try_remove_selected_external_sub(client: MPVIPCClient) -> None:
try:
- client.send_command({"command": ["sub-remove"]})
+ client.send_command({
+ "command": ["sub-remove"]
+ })
except Exception:
return
def _try_add_external_sub(client: MPVIPCClient, path: Path) -> None:
try:
- client.send_command({"command": ["sub-add", str(path), "select", "medeia-sub"]})
+ client.send_command(
+ {
+ "command": ["sub-add",
+ str(path),
+ "select",
+ "medeia-sub"]
+ }
+ )
except Exception:
return
@@ -658,7 +682,8 @@ def _resolve_store_backend_for_target(
target: str,
file_hash: str,
config: dict,
-) -> tuple[Optional[str], Any]:
+) -> tuple[Optional[str],
+ Any]:
"""Resolve a store backend for a local mpv target using the store DB.
A target is considered valid only when:
@@ -756,7 +781,10 @@ def _infer_store_for_target(*, target: str, config: dict) -> Optional[str]:
root = None
try:
root = (
- getattr(backend, "_location", None) or getattr(backend, "location", lambda: None)()
+ getattr(backend,
+ "_location",
+ None) or getattr(backend,
+ "location", lambda: None)()
)
except Exception:
root = None
@@ -795,7 +823,12 @@ def _infer_hash_for_target(target: str) -> Optional[str]:
return None
-def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] = None) -> int:
+def run_auto_overlay(
+ *,
+ mpv: MPV,
+ poll_s: float = 0.15,
+ config: Optional[dict] = None
+) -> int:
"""Auto mode: track mpv's current file and render lyrics (note: 'lyric') or load subtitles (note: 'sub')."""
cfg = config or {}
@@ -827,7 +860,10 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
try:
# Toggle support (mpv Lua script sets this property; default to visible).
visible_raw = _ipc_get_property(
- client, _LYRIC_VISIBLE_PROP, True, raise_on_disconnect=True
+ client,
+ _LYRIC_VISIBLE_PROP,
+ True,
+ raise_on_disconnect=True
)
raw_path = _ipc_get_property(client, "path", None, raise_on_disconnect=True)
except ConnectionError:
@@ -872,7 +908,9 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
else:
last_visible = visible
- target = _unwrap_memory_m3u(str(raw_path)) if isinstance(raw_path, str) else None
+ target = _unwrap_memory_m3u(str(raw_path)
+ ) if isinstance(raw_path,
+ str) else None
if isinstance(target, str):
target = _normalize_file_uri_target(target)
@@ -928,7 +966,8 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
# HTTP/HTTPS targets are only valid if they map to a store backend.
store_from_url = _extract_store_from_url_target(target)
store_name = store_from_url or _infer_hydrus_store_from_url_target(
- target=target, config=cfg
+ target=target,
+ config=cfg
)
if not store_name:
_log("HTTP target has no store mapping; lyrics disabled")
@@ -954,7 +993,9 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
current_backend = reg[store_name]
current_store_name = store_name
except Exception:
- _log(f"HTTP target store {store_name!r} not available; lyrics disabled")
+ _log(
+ f"HTTP target store {store_name!r} not available; lyrics disabled"
+ )
current_store_name = None
current_backend = None
current_key = None
@@ -995,7 +1036,9 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
continue
current_key = f"{current_store_name}:{current_file_hash}"
- _log(f"Resolved store={current_store_name!r} hash={current_file_hash!r} valid=True")
+ _log(
+ f"Resolved store={current_store_name!r} hash={current_file_hash!r} valid=True"
+ )
else:
# Local files: resolve store item via store DB. If not resolvable, lyrics are disabled.
@@ -1006,8 +1049,7 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
)
current_key = (
f"{current_store_name}:{current_file_hash}"
- if current_store_name and current_file_hash
- else None
+ if current_store_name and current_file_hash else None
)
_log(
@@ -1032,16 +1074,15 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
# Load/reload lyrics when we have a resolvable key and it differs from what we loaded.
# This is important for the autofetch path: the note can appear without the mpv target changing.
- if (
- current_key
- and current_key != last_loaded_key
- and current_store_name
- and current_file_hash
- and current_backend
- ):
- notes: Dict[str, str] = {}
+ if (current_key and current_key != last_loaded_key and current_store_name
+ and current_file_hash and current_backend):
+ notes: Dict[str,
+ str] = {}
try:
- notes = current_backend.get_note(current_file_hash, config=cfg) or {}
+ notes = current_backend.get_note(
+ current_file_hash,
+ config=cfg
+ ) or {}
except Exception:
notes = {}
@@ -1092,11 +1133,8 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
# Throttle attempts per key to avoid hammering APIs.
autofetch_enabled = bool(cfg.get("lyric_autofetch", True))
now = time.time()
- if (
- autofetch_enabled
- and current_key != last_fetch_attempt_key
- and (now - last_fetch_attempt_at) > 2.0
- ):
+ if (autofetch_enabled and current_key != last_fetch_attempt_key
+ and (now - last_fetch_attempt_at) > 2.0):
last_fetch_attempt_key = current_key
last_fetch_attempt_at = now
@@ -1128,7 +1166,10 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
artist=artist,
title=title,
duration_s=(
- float(duration_s) if isinstance(duration_s, (int, float)) else None
+ float(duration_s)
+ if isinstance(duration_s,
+ (int,
+ float)) else None
),
)
if not fetched or not fetched.strip():
@@ -1137,7 +1178,10 @@ def run_auto_overlay(*, mpv: MPV, poll_s: float = 0.15, config: Optional[dict] =
try:
ok = bool(
current_backend.set_note(
- current_file_hash, "lyric", fetched, config=cfg
+ current_file_hash,
+ "lyric",
+ fetched,
+ config=cfg
)
)
_log(f"Autofetch stored lyric note ok={ok}")
@@ -1230,7 +1274,8 @@ def run_overlay(*, mpv: MPV, entries: List[LrcLine], poll_s: float = 0.15) -> in
client = mpv.client()
if not client.connect():
print(
- "mpv IPC is not reachable (is mpv running with --input-ipc-server?).", file=sys.stderr
+ "mpv IPC is not reachable (is mpv running with --input-ipc-server?).",
+ file=sys.stderr
)
return 3
diff --git a/MPV/mpv_ipc.py b/MPV/mpv_ipc.py
index 77b62d4..c4efc7f 100644
--- a/MPV/mpv_ipc.py
+++ b/MPV/mpv_ipc.py
@@ -20,16 +20,13 @@ from typing import Any, Dict, Optional, List, BinaryIO, Tuple, cast
from SYS.logger import debug
-
# Fixed pipe name for persistent MPV connection across all Python sessions
FIXED_IPC_PIPE_NAME = "mpv-medeia-macina"
MPV_LUA_SCRIPT_PATH = str(Path(__file__).resolve().parent / "LUA" / "main.lua")
-
_LYRIC_PROCESS: Optional[subprocess.Popen] = None
_LYRIC_LOG_FH: Optional[Any] = None
-
_MPV_AVAILABILITY_CACHE: Optional[Tuple[bool, Optional[str]]] = None
@@ -64,7 +61,8 @@ def _windows_hidden_subprocess_kwargs() -> Dict[str, Any]:
if platform.system() != "Windows":
return {}
- kwargs: Dict[str, Any] = {}
+ kwargs: Dict[str,
+ Any] = {}
try:
create_no_window = getattr(subprocess, "CREATE_NO_WINDOW", 0x08000000)
kwargs["creationflags"] = int(create_no_window)
@@ -103,7 +101,8 @@ def _check_mpv_availability() -> Tuple[bool, Optional[str]]:
try:
result = subprocess.run(
- [mpv_path, "--version"],
+ [mpv_path,
+ "--version"],
capture_output=True,
text=True,
timeout=2,
@@ -112,7 +111,10 @@ def _check_mpv_availability() -> Tuple[bool, Optional[str]]:
if result.returncode == 0:
_MPV_AVAILABILITY_CACHE = (True, None)
return _MPV_AVAILABILITY_CACHE
- _MPV_AVAILABILITY_CACHE = (False, f"MPV returned non-zero exit code: {result.returncode}")
+ _MPV_AVAILABILITY_CACHE = (
+ False,
+ f"MPV returned non-zero exit code: {result.returncode}"
+ )
return _MPV_AVAILABILITY_CACHE
except Exception as exc:
_MPV_AVAILABILITY_CACHE = (False, f"Error running MPV: {exc}")
@@ -141,7 +143,10 @@ def _windows_list_lyric_helper_pids(ipc_path: str) -> List[int]:
try:
out = subprocess.check_output(
- ["powershell", "-NoProfile", "-Command", ps_script],
+ ["powershell",
+ "-NoProfile",
+ "-Command",
+ ps_script],
stdin=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
timeout=2,
@@ -186,7 +191,10 @@ def _windows_kill_pids(pids: List[int]) -> None:
for pid in pids or []:
try:
subprocess.run(
- ["taskkill", "/PID", str(int(pid)), "/F"],
+ ["taskkill",
+ "/PID",
+ str(int(pid)),
+ "/F"],
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
@@ -237,7 +245,11 @@ class MPV:
self.lua_script_path = str(lua_path)
def client(self, silent: bool = False) -> "MPVIPCClient":
- return MPVIPCClient(socket_path=self.ipc_path, timeout=self.timeout, silent=bool(silent))
+ return MPVIPCClient(
+ socket_path=self.ipc_path,
+ timeout=self.timeout,
+ silent=bool(silent)
+ )
def is_running(self) -> bool:
client = self.client(silent=True)
@@ -247,9 +259,11 @@ class MPV:
finally:
client.disconnect()
- def send(
- self, command: Dict[str, Any] | List[Any], silent: bool = False
- ) -> Optional[Dict[str, Any]]:
+ def send(self,
+ command: Dict[str,
+ Any] | List[Any],
+ silent: bool = False) -> Optional[Dict[str,
+ Any]]:
client = self.client(silent=bool(silent))
try:
if not client.connect():
@@ -263,13 +277,20 @@ class MPV:
client.disconnect()
def get_property(self, name: str, default: Any = None) -> Any:
- resp = self.send({"command": ["get_property", name]})
+ resp = self.send({
+ "command": ["get_property",
+ name]
+ })
if resp and resp.get("error") == "success":
return resp.get("data", default)
return default
def set_property(self, name: str, value: Any) -> bool:
- resp = self.send({"command": ["set_property", name, value]})
+ resp = self.send({
+ "command": ["set_property",
+ name,
+ value]
+ })
return bool(resp and resp.get("error") == "success")
def download(
@@ -279,7 +300,8 @@ class MPV:
fmt: str,
store: Optional[str] = None,
path: Optional[str] = None,
- ) -> Dict[str, Any]:
+ ) -> Dict[str,
+ Any]:
"""Download a URL using the same pipeline semantics as the MPV UI.
This is intended as a stable Python entrypoint for "button actions".
@@ -291,9 +313,19 @@ class MPV:
path = str(path or "").strip() if path is not None else None
if not url:
- return {"success": False, "stdout": "", "stderr": "", "error": "Missing url"}
+ return {
+ "success": False,
+ "stdout": "",
+ "stderr": "",
+ "error": "Missing url"
+ }
if not fmt:
- return {"success": False, "stdout": "", "stderr": "", "error": "Missing fmt"}
+ return {
+ "success": False,
+ "stdout": "",
+ "stderr": "",
+ "error": "Missing fmt"
+ }
if bool(store) == bool(path):
return {
"success": False,
@@ -323,10 +355,18 @@ class MPV:
executor = PipelineExecutor()
result = executor.run_pipeline(pipeline)
return {
- "success": bool(getattr(result, "success", False)),
- "stdout": getattr(result, "stdout", "") or "",
- "stderr": getattr(result, "stderr", "") or "",
- "error": getattr(result, "error", None),
+ "success": bool(getattr(result,
+ "success",
+ False)),
+ "stdout": getattr(result,
+ "stdout",
+ "") or "",
+ "stderr": getattr(result,
+ "stderr",
+ "") or "",
+ "error": getattr(result,
+ "error",
+ None),
"pipeline": pipeline,
}
except Exception as exc:
@@ -340,7 +380,12 @@ class MPV:
def get_playlist(self, silent: bool = False) -> Optional[List[Dict[str, Any]]]:
resp = self.send(
- {"command": ["get_property", "playlist"], "request_id": 100}, silent=silent
+ {
+ "command": ["get_property",
+ "playlist"],
+ "request_id": 100
+ },
+ silent=silent
)
if resp is None:
return None
@@ -383,7 +428,14 @@ class MPV:
if not script_path or not os.path.exists(script_path):
return
# Safe to call repeatedly; mpv will reload the script.
- self.send({"command": ["load-script", script_path], "request_id": 12}, silent=True)
+ self.send(
+ {
+ "command": ["load-script",
+ script_path],
+ "request_id": 12
+ },
+ silent=True
+ )
except Exception:
return
@@ -465,11 +517,12 @@ class MPV:
except Exception:
_LYRIC_LOG_FH = None
- kwargs: Dict[str, Any] = {
- "stdin": subprocess.DEVNULL,
- "stdout": _LYRIC_LOG_FH or subprocess.DEVNULL,
- "stderr": _LYRIC_LOG_FH or subprocess.DEVNULL,
- }
+ kwargs: Dict[str,
+ Any] = {
+ "stdin": subprocess.DEVNULL,
+ "stdout": _LYRIC_LOG_FH or subprocess.DEVNULL,
+ "stderr": _LYRIC_LOG_FH or subprocess.DEVNULL,
+ }
# Ensure immediate flushing to the log file.
env = os.environ.copy()
@@ -477,9 +530,8 @@ class MPV:
try:
existing_pp = env.get("PYTHONPATH")
env["PYTHONPATH"] = (
- str(repo_root)
- if not existing_pp
- else (str(repo_root) + os.pathsep + str(existing_pp))
+ str(repo_root) if not existing_pp else
+ (str(repo_root) + os.pathsep + str(existing_pp))
)
except Exception:
pass
@@ -528,7 +580,10 @@ class MPV:
return
try:
subprocess.run(
- ["taskkill", "/IM", "mpv.exe", "/F"],
+ ["taskkill",
+ "/IM",
+ "mpv.exe",
+ "/F"],
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
@@ -613,12 +668,17 @@ class MPV:
if extra_args:
cmd.extend([str(a) for a in extra_args if a])
- kwargs: Dict[str, Any] = {}
+ kwargs: Dict[str,
+ Any] = {}
if platform.system() == "Windows":
# Ensure we don't flash a console window when spawning mpv.
flags = 0
try:
- flags |= int(getattr(subprocess, "DETACHED_PROCESS", 0x00000008)) if detached else 0
+ flags |= int(
+ getattr(subprocess,
+ "DETACHED_PROCESS",
+ 0x00000008)
+ ) if detached else 0
except Exception:
flags |= 0x00000008 if detached else 0
try:
@@ -666,22 +726,30 @@ class MPV:
try:
existing_pp = helper_env.get("PYTHONPATH")
helper_env["PYTHONPATH"] = (
- str(repo_root)
- if not existing_pp
- else (str(repo_root) + os.pathsep + str(existing_pp))
+ str(repo_root) if not existing_pp else
+ (str(repo_root) + os.pathsep + str(existing_pp))
)
except Exception:
pass
- helper_kwargs: Dict[str, Any] = {}
+ helper_kwargs: Dict[str,
+ Any] = {}
if platform.system() == "Windows":
flags = 0
try:
- flags |= int(getattr(subprocess, "DETACHED_PROCESS", 0x00000008))
+ flags |= int(
+ getattr(subprocess,
+ "DETACHED_PROCESS",
+ 0x00000008)
+ )
except Exception:
flags |= 0x00000008
try:
- flags |= int(getattr(subprocess, "CREATE_NO_WINDOW", 0x08000000))
+ flags |= int(
+ getattr(subprocess,
+ "CREATE_NO_WINDOW",
+ 0x08000000)
+ )
except Exception:
flags |= 0x08000000
helper_kwargs["creationflags"] = flags
@@ -750,7 +818,10 @@ class MPVIPCClient:
"""
def __init__(
- self, socket_path: Optional[str] = None, timeout: float = 5.0, silent: bool = False
+ self,
+ socket_path: Optional[str] = None,
+ timeout: float = 5.0,
+ silent: bool = False
):
"""Initialize MPV IPC client.
@@ -798,8 +869,8 @@ class MPVIPCClient:
while True:
nl = self._recv_buffer.find(b"\n")
if nl != -1:
- line = self._recv_buffer[: nl + 1]
- self._recv_buffer = self._recv_buffer[nl + 1 :]
+ line = self._recv_buffer[:nl + 1]
+ self._recv_buffer = self._recv_buffer[nl + 1:]
return line
remaining = deadline - _time.time()
@@ -824,7 +895,10 @@ class MPVIPCClient:
return b""
self._recv_buffer += chunk
- def read_message(self, *, timeout: Optional[float] = None) -> Optional[Dict[str, Any]]:
+ def read_message(self,
+ *,
+ timeout: Optional[float] = None) -> Optional[Dict[str,
+ Any]]:
"""Read the next JSON message/event from MPV.
Returns:
@@ -836,13 +910,17 @@ class MPVIPCClient:
if raw is None:
return None
if raw == b"":
- return {"event": "__eof__"}
+ return {
+ "event": "__eof__"
+ }
try:
return json.loads(raw.decode("utf-8", errors="replace").strip())
except Exception:
return None
- def send_command_no_wait(self, command_data: Dict[str, Any] | List[Any]) -> Optional[int]:
+ def send_command_no_wait(self,
+ command_data: Dict[str,
+ Any] | List[Any]) -> Optional[int]:
"""Send a command to mpv without waiting for its response.
This is important for long-running event loops (helpers) so we don't
@@ -851,7 +929,9 @@ class MPVIPCClient:
try:
request: Dict[str, Any]
if isinstance(command_data, list):
- request = {"command": command_data}
+ request = {
+ "command": command_data
+ }
else:
request = dict(command_data)
@@ -910,7 +990,10 @@ class MPVIPCClient:
self.sock = None
return False
- def send_command(self, command_data: Dict[str, Any] | List[Any]) -> Optional[Dict[str, Any]]:
+ def send_command(self,
+ command_data: Dict[str,
+ Any] | List[Any]) -> Optional[Dict[str,
+ Any]]:
"""Send a command to mpv and get response.
Args:
@@ -927,7 +1010,9 @@ class MPVIPCClient:
# Format command as JSON (mpv IPC protocol)
request: Dict[str, Any]
if isinstance(command_data, list):
- request = {"command": command_data}
+ request = {
+ "command": command_data
+ }
else:
request = command_data
@@ -958,7 +1043,10 @@ class MPVIPCClient:
break
try:
- lines = response_data.decode("utf-8", errors="replace").strip().split("\n")
+ lines = response_data.decode(
+ "utf-8",
+ errors="replace"
+ ).strip().split("\n")
for line in lines:
if not line:
continue
diff --git a/MPV/mpv_lua_api.py b/MPV/mpv_lua_api.py
index 14ab421..9a501a9 100644
--- a/MPV/mpv_lua_api.py
+++ b/MPV/mpv_lua_api.py
@@ -32,7 +32,8 @@ def setup_logging(log_file: Optional[Path] = None) -> logging.Logger:
handler = logging.StreamHandler(sys.stderr)
formatter = logging.Formatter(
- "[%(asctime)s][%(levelname)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
+ "[%(asctime)s][%(levelname)s] %(message)s",
+ datefmt="%Y-%m-%d %H:%M:%S"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
@@ -51,7 +52,8 @@ def execute_pipeline(
pipeline_cmd: str,
log_file: Optional[Path] = None,
dry_run: bool = False,
-) -> Dict[str, Any]:
+) -> Dict[str,
+ Any]:
"""Execute a pipeline command and return result as JSON.
Args:
@@ -84,11 +86,16 @@ def execute_pipeline(
cmd_args = shlex.split(pipeline_cmd)
result = subprocess.run(
- [sys.executable, "-m", "CLI"] + cmd_args,
+ [sys.executable,
+ "-m",
+ "CLI"] + cmd_args,
capture_output=True,
text=True,
cwd=str(_ROOT_DIR),
- env={**dict(__import__("os").environ), "MEDEIA_MPV_CALLER": "lua"},
+ env={
+ **dict(__import__("os").environ),
+ "MEDEIA_MPV_CALLER": "lua"
+ },
)
if log_file:
@@ -145,12 +152,10 @@ def handle_api_request(request_json: str, log_file: Optional[Path] = None) -> st
return json.dumps(result)
else:
- return json.dumps(
- {
- "success": False,
- "error": f"Unknown command: {cmd}",
- }
- )
+ return json.dumps({
+ "success": False,
+ "error": f"Unknown command: {cmd}",
+ })
except Exception as exc:
return json.dumps(
@@ -166,7 +171,10 @@ if __name__ == "__main__":
# python mpv_lua_api.py
if len(sys.argv) < 2:
- print(json.dumps({"success": False, "error": "No request provided"}))
+ print(json.dumps({
+ "success": False,
+ "error": "No request provided"
+ }))
sys.exit(1)
request_json = sys.argv[1]
diff --git a/MPV/pipeline_helper.py b/MPV/pipeline_helper.py
index e437ded..0f24da2 100644
--- a/MPV/pipeline_helper.py
+++ b/MPV/pipeline_helper.py
@@ -61,12 +61,10 @@ _ROOT = str(_repo_root())
if _ROOT not in sys.path:
sys.path.insert(0, _ROOT)
-
from MPV.mpv_ipc import MPVIPCClient # noqa: E402
-from config import load_config # noqa: E402
+from SYS.config import load_config # noqa: E402
from SYS.logger import set_debug, debug, set_thread_stream # noqa: E402
-
REQUEST_PROP = "user-data/medeia-pipeline-request"
RESPONSE_PROP = "user-data/medeia-pipeline-response"
READY_PROP = "user-data/medeia-pipeline-ready"
@@ -103,8 +101,12 @@ def _run_pipeline(pipeline_text: str, *, seeds: Any = None) -> Dict[str, Any]:
try:
cols_payload.append(
{
- "name": getattr(c, "name", ""),
- "value": getattr(c, "value", ""),
+ "name": getattr(c,
+ "name",
+ ""),
+ "value": getattr(c,
+ "value",
+ ""),
}
)
except Exception:
@@ -118,10 +120,18 @@ def _run_pipeline(pipeline_text: str, *, seeds: Any = None) -> Dict[str, Any]:
except Exception:
sel_args = None
- rows_payload.append({"columns": cols_payload, "selection_args": sel_args})
+ rows_payload.append(
+ {
+ "columns": cols_payload,
+ "selection_args": sel_args
+ }
+ )
# Only return JSON-serializable data (Lua only needs title + rows).
- return {"title": str(title or ""), "rows": rows_payload}
+ return {
+ "title": str(title or ""),
+ "rows": rows_payload
+ }
executor = PipelineExecutor()
result = executor.run_pipeline(pipeline_text, seeds=seeds)
@@ -150,7 +160,10 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
"""
op_name = str(op or "").strip().lower()
- if op_name in {"run-detached", "run_detached", "pipeline-detached", "pipeline_detached"}:
+ if op_name in {"run-detached",
+ "run_detached",
+ "pipeline-detached",
+ "pipeline_detached"}:
pipeline_text = ""
seeds = None
if isinstance(data, dict):
@@ -194,12 +207,13 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
# Best-effort; seeds are optional.
pass
- popen_kwargs: Dict[str, Any] = {
- "stdin": subprocess.DEVNULL,
- "stdout": subprocess.DEVNULL,
- "stderr": subprocess.DEVNULL,
- "cwd": str(_repo_root()),
- }
+ popen_kwargs: Dict[str,
+ Any] = {
+ "stdin": subprocess.DEVNULL,
+ "stdout": subprocess.DEVNULL,
+ "stderr": subprocess.DEVNULL,
+ "cwd": str(_repo_root()),
+ }
if platform.system() == "Windows":
flags = 0
try:
@@ -213,7 +227,11 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
popen_kwargs["creationflags"] = int(flags)
try:
si = subprocess.STARTUPINFO()
- si.dwFlags |= int(getattr(subprocess, "STARTF_USESHOWWINDOW", 0x00000001))
+ si.dwFlags |= int(
+ getattr(subprocess,
+ "STARTF_USESHOWWINDOW",
+ 0x00000001)
+ )
si.wShowWindow = subprocess.SW_HIDE
popen_kwargs["startupinfo"] = si
except Exception:
@@ -228,7 +246,8 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
"success": False,
"stdout": "",
"stderr": "",
- "error": f"Failed to spawn detached pipeline: {type(exc).__name__}: {exc}",
+ "error":
+ f"Failed to spawn detached pipeline: {type(exc).__name__}: {exc}",
"table": None,
}
@@ -238,16 +257,21 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
"stderr": "",
"error": None,
"table": None,
- "pid": int(getattr(proc, "pid", 0) or 0),
+ "pid": int(getattr(proc,
+ "pid",
+ 0) or 0),
}
# Provide store backend choices using the same source as CLI/Typer autocomplete.
- if op_name in {"store-choices", "store_choices", "get-store-choices", "get_store_choices"}:
+ if op_name in {"store-choices",
+ "store_choices",
+ "get-store-choices",
+ "get_store_choices"}:
# IMPORTANT:
# - Prefer runtime cwd for config discovery (mpv spawns us with cwd=repo_root).
# - Avoid returning a cached empty result if config was loaded before it existed.
try:
- from config import reload_config # noqa: WPS433
+ from SYS.config import reload_config # noqa: WPS433
from Store import Store # noqa: WPS433
config_root = _runtime_config_root()
@@ -255,7 +279,8 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
storage = Store(config=cfg, suppress_debug=True)
backends = storage.list_backends() or []
- choices = sorted({str(n) for n in backends if str(n).strip()})
+ choices = sorted({str(n)
+ for n in backends if str(n).strip()})
# Fallback: if initialization gated all backends (e.g., missing deps or offline stores),
# still return configured instance names so the UI can present something.
@@ -269,7 +294,8 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
for instance_key, instance_cfg in instances.items():
name = None
if isinstance(instance_cfg, dict):
- name = instance_cfg.get("NAME") or instance_cfg.get("name")
+ name = instance_cfg.get("NAME"
+ ) or instance_cfg.get("name")
candidate = str(name or instance_key or "").strip()
if candidate:
seen.add(candidate)
@@ -297,7 +323,10 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
# Provide yt-dlp format list for a URL (for MPV "Change format" menu).
# Returns a ResultTable-like payload so the Lua UI can render without running cmdlets.
- if op_name in {"ytdlp-formats", "ytdlp_formats", "ytdl-formats", "ytdl_formats"}:
+ if op_name in {"ytdlp-formats",
+ "ytdlp_formats",
+ "ytdl-formats",
+ "ytdl_formats"}:
try:
url = None
if isinstance(data, dict):
@@ -335,7 +364,8 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
"success": False,
"stdout": "",
"stderr": "",
- "error": f"yt-dlp module not available: {type(exc).__name__}: {exc}",
+ "error":
+ f"yt-dlp module not available: {type(exc).__name__}: {exc}",
"table": None,
}
@@ -350,16 +380,17 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
except Exception:
cookiefile = None
- ydl_opts: Dict[str, Any] = {
- "quiet": True,
- "no_warnings": True,
- "socket_timeout": 20,
- "retries": 2,
- "skip_download": True,
- # Avoid accidentally expanding huge playlists on load.
- "noplaylist": True,
- "noprogress": True,
- }
+ ydl_opts: Dict[str,
+ Any] = {
+ "quiet": True,
+ "no_warnings": True,
+ "socket_timeout": 20,
+ "retries": 2,
+ "skip_download": True,
+ # Avoid accidentally expanding huge playlists on load.
+ "noplaylist": True,
+ "noprogress": True,
+ }
if cookiefile:
ydl_opts["cookiefile"] = cookiefile
@@ -386,7 +417,9 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
try:
formats_any = info.get("formats") if isinstance(info, dict) else None
count = len(formats_any) if isinstance(formats_any, list) else 0
- _append_helper_log(f"[ytdlp-formats] extracted formats count={count} url={url}")
+ _append_helper_log(
+ f"[ytdlp-formats] extracted formats count={count} url={url}"
+ )
if isinstance(formats_any, list) and formats_any:
limit = 60
@@ -414,7 +447,9 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
f"[ytdlp-format {i:02d}] id={fid} ext={ext} res={res} note={note} codecs={vcodec}/{acodec} size={size}"
)
if count > limit:
- _append_helper_log(f"[ytdlp-formats] (truncated; total={count})")
+ _append_helper_log(
+ f"[ytdlp-formats] (truncated; total={count})"
+ )
except Exception:
pass
@@ -422,10 +457,13 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
try:
dump = os.environ.get("MEDEIA_MPV_YTDLP_DUMP", "").strip()
if dump and dump != "0" and isinstance(info, dict):
- h = hashlib.sha1(url.encode("utf-8", errors="replace")).hexdigest()[:10]
+ h = hashlib.sha1(url.encode("utf-8",
+ errors="replace")).hexdigest()[:10]
out_path = _repo_root() / "Log" / f"ytdlp-probe-{h}.json"
out_path.write_text(
- json.dumps(info, ensure_ascii=False, indent=2),
+ json.dumps(info,
+ ensure_ascii=False,
+ indent=2),
encoding="utf-8",
errors="replace",
)
@@ -449,7 +487,10 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
"stdout": "",
"stderr": "",
"error": None,
- "table": {"title": "Formats", "rows": []},
+ "table": {
+ "title": "Formats",
+ "rows": []
+ },
}
rows = []
@@ -482,12 +523,25 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
rows.append(
{
"columns": [
- {"name": "ID", "value": format_id},
- {"name": "Resolution", "value": resolution or ""},
- {"name": "Ext", "value": ext or ""},
- {"name": "Size", "value": size or ""},
+ {
+ "name": "ID",
+ "value": format_id
+ },
+ {
+ "name": "Resolution",
+ "value": resolution or ""
+ },
+ {
+ "name": "Ext",
+ "value": ext or ""
+ },
+ {
+ "name": "Size",
+ "value": size or ""
+ },
],
- "selection_args": selection_args,
+ "selection_args":
+ selection_args,
}
)
@@ -496,7 +550,10 @@ def _run_op(op: str, data: Any) -> Dict[str, Any]:
"stdout": "",
"stderr": "",
"error": None,
- "table": {"title": "Formats", "rows": rows},
+ "table": {
+ "title": "Formats",
+ "rows": rows
+ },
}
except Exception as exc:
return {
@@ -622,7 +679,10 @@ def main(argv: Optional[list[str]] = None) -> int:
format="[%(name)s] %(levelname)s: %(message)s",
stream=sys.stderr,
)
- for noisy in ("httpx", "httpcore", "httpcore.http11", "httpcore.connection"):
+ for noisy in ("httpx",
+ "httpcore",
+ "httpcore.http11",
+ "httpcore.connection"):
try:
logging.getLogger(noisy).setLevel(logging.WARNING)
except Exception:
@@ -643,7 +703,9 @@ def main(argv: Optional[list[str]] = None) -> int:
return 0
try:
- _append_helper_log(f"[helper] version={MEDEIA_MPV_HELPER_VERSION} started ipc={args.ipc}")
+ _append_helper_log(
+ f"[helper] version={MEDEIA_MPV_HELPER_VERSION} started ipc={args.ipc}"
+ )
try:
_append_helper_log(
f"[helper] file={Path(__file__).resolve()} cwd={Path.cwd().resolve()}"
@@ -666,6 +728,7 @@ def main(argv: Optional[list[str]] = None) -> int:
try:
class _HelperLogStream:
+
def __init__(self) -> None:
self._pending = ""
@@ -759,7 +822,11 @@ def main(argv: Optional[list[str]] = None) -> int:
if (now - last_ready_ts) < 0.75:
return
try:
- client.send_command_no_wait(["set_property_string", READY_PROP, str(int(now))])
+ client.send_command_no_wait(
+ ["set_property_string",
+ READY_PROP,
+ str(int(now))]
+ )
last_ready_ts = now
except Exception:
return
@@ -789,7 +856,12 @@ def main(argv: Optional[list[str]] = None) -> int:
# Observe request property changes.
try:
- client.send_command_no_wait(["observe_property", OBS_ID_REQUEST, REQUEST_PROP, "string"])
+ client.send_command_no_wait(
+ ["observe_property",
+ OBS_ID_REQUEST,
+ REQUEST_PROP,
+ "string"]
+ )
except Exception:
return 3
@@ -807,8 +879,8 @@ def main(argv: Optional[list[str]] = None) -> int:
startup_choices_payload = _run_op("store-choices", None)
startup_choices = (
startup_choices_payload.get("choices")
- if isinstance(startup_choices_payload, dict)
- else None
+ if isinstance(startup_choices_payload,
+ dict) else None
)
if isinstance(startup_choices, list):
preview = ", ".join(str(x) for x in startup_choices[:50])
@@ -819,10 +891,18 @@ def main(argv: Optional[list[str]] = None) -> int:
# Publish to a cached property for Lua to read without IPC request.
try:
cached_json = json.dumps(
- {"success": True, "choices": startup_choices}, ensure_ascii=False
+ {
+ "success": True,
+ "choices": startup_choices
+ },
+ ensure_ascii=False
)
client.send_command_no_wait(
- ["set_property_string", "user-data/medeia-store-choices-cached", cached_json]
+ [
+ "set_property_string",
+ "user-data/medeia-store-choices-cached",
+ cached_json
+ ]
)
_append_helper_log(
f"[helper] published store-choices to user-data/medeia-store-choices-cached"
@@ -834,23 +914,29 @@ def main(argv: Optional[list[str]] = None) -> int:
else:
_append_helper_log("[helper] startup store-choices unavailable")
except Exception as exc:
- _append_helper_log(f"[helper] startup store-choices failed: {type(exc).__name__}: {exc}")
+ _append_helper_log(
+ f"[helper] startup store-choices failed: {type(exc).__name__}: {exc}"
+ )
# Also publish config temp directory if available
try:
- from config import load_config
+ from SYS.config import load_config
cfg = load_config()
temp_dir = cfg.get("temp", "").strip() or os.getenv("TEMP") or "/tmp"
if temp_dir:
client.send_command_no_wait(
- ["set_property_string", "user-data/medeia-config-temp", temp_dir]
+ ["set_property_string",
+ "user-data/medeia-config-temp",
+ temp_dir]
)
_append_helper_log(
f"[helper] published config temp to user-data/medeia-config-temp={temp_dir}"
)
except Exception as exc:
- _append_helper_log(f"[helper] failed to publish config temp: {type(exc).__name__}: {exc}")
+ _append_helper_log(
+ f"[helper] failed to publish config temp: {type(exc).__name__}: {exc}"
+ )
last_seen_id: Optional[str] = None
@@ -889,9 +975,8 @@ def main(argv: Optional[list[str]] = None) -> int:
if "quic" in lower_prefix and "DEBUG:" in text:
continue
# Suppress progress-bar style lines (keep true errors).
- if ("ETA" in text or "%" in text) and (
- "ERROR:" not in text and "WARNING:" not in text
- ):
+ if ("ETA" in text or "%" in text) and ("ERROR:" not in text
+ and "WARNING:" not in text):
# Typical yt-dlp progress bar line.
if text.lstrip().startswith("["):
continue
@@ -927,7 +1012,9 @@ def main(argv: Optional[list[str]] = None) -> int:
snippet = raw.strip().replace("\r", "").replace("\n", " ")
if len(snippet) > 220:
snippet = snippet[:220] + "…"
- _append_helper_log(f"[request-raw] could not parse request json: {snippet}")
+ _append_helper_log(
+ f"[request-raw] could not parse request json: {snippet}"
+ )
except Exception:
pass
continue
@@ -946,7 +1033,9 @@ def main(argv: Optional[list[str]] = None) -> int:
last_seen_id = req_id
try:
- label = pipeline_text if pipeline_text else (op and ("op=" + op) or "(empty)")
+ label = pipeline_text if pipeline_text else (
+ op and ("op=" + op) or "(empty)"
+ )
_append_helper_log(f"\n[request {req_id}] {label}")
except Exception:
pass
@@ -962,8 +1051,10 @@ def main(argv: Optional[list[str]] = None) -> int:
resp = {
"id": req_id,
"success": bool(run.get("success")),
- "stdout": run.get("stdout", ""),
- "stderr": run.get("stderr", ""),
+ "stdout": run.get("stdout",
+ ""),
+ "stderr": run.get("stderr",
+ ""),
"error": run.get("error"),
"table": run.get("table"),
}
@@ -1004,7 +1095,12 @@ def main(argv: Optional[list[str]] = None) -> int:
# IMPORTANT: don't wait for a response here; waiting would consume
# async events and can drop/skip property-change notifications.
client.send_command_no_wait(
- ["set_property_string", RESPONSE_PROP, json.dumps(resp, ensure_ascii=False)]
+ [
+ "set_property_string",
+ RESPONSE_PROP,
+ json.dumps(resp,
+ ensure_ascii=False)
+ ]
)
except Exception:
# If posting results fails, there's nothing more useful to do.
diff --git a/Provider/alldebrid.py b/Provider/alldebrid.py
index 8d7729b..e55fac1 100644
--- a/Provider/alldebrid.py
+++ b/Provider/alldebrid.py
@@ -37,7 +37,7 @@ def _get_debrid_api_key(config: Dict[str, Any]) -> Optional[str]:
# 2) store.debrid block (canonical for debrid store configuration)
try:
- from config import get_debrid_api_key
+ from SYS.config import get_debrid_api_key
key = get_debrid_api_key(config, service="All-debrid")
return key.strip() if key else None
@@ -97,23 +97,27 @@ class AllDebrid(Provider):
# Quiet mode when download-file is mid-pipeline.
quiet = (
bool(self.config.get("_quiet_background_output"))
- if isinstance(self.config, dict)
- else False
+ if isinstance(self.config,
+ dict) else False
)
unlocked_url = target
try:
unlocked = client.unlock_link(target)
- if isinstance(unlocked, str) and unlocked.strip().startswith(
- ("http://", "https://")
- ):
+ if isinstance(unlocked,
+ str) and unlocked.strip().startswith(("http://",
+ "https://")):
unlocked_url = unlocked.strip()
except Exception as exc:
# Fall back to the raw link, but warn.
log(f"[alldebrid] Failed to unlock link: {exc}", file=sys.stderr)
# Prefer provider title as the output filename.
- suggested = sanitize_filename(str(getattr(result, "title", "") or "").strip())
+ suggested = sanitize_filename(
+ str(getattr(result,
+ "title",
+ "") or "").strip()
+ )
suggested_name = suggested if suggested else None
try:
@@ -142,11 +146,9 @@ class AllDebrid(Provider):
try:
if downloaded_path.exists():
size = downloaded_path.stat().st_size
- if (
- size > 0
- and size <= 250_000
- and downloaded_path.suffix.lower() not in (".html", ".htm")
- ):
+ if (size > 0 and size <= 250_000
+ and downloaded_path.suffix.lower() not in (".html",
+ ".htm")):
head = downloaded_path.read_bytes()[:512]
try:
text = head.decode("utf-8", errors="ignore").lower()
@@ -173,9 +175,10 @@ class AllDebrid(Provider):
return None
@staticmethod
- def _flatten_files(
- items: Any, *, _prefix: Optional[List[str]] = None
- ) -> Iterable[Dict[str, Any]]:
+ def _flatten_files(items: Any,
+ *,
+ _prefix: Optional[List[str]] = None) -> Iterable[Dict[str,
+ Any]]:
"""Flatten AllDebrid magnet file tree into file dicts, preserving relative paths.
API commonly returns:
@@ -211,7 +214,9 @@ class AllDebrid(Provider):
name = node.get("n") or node.get("name")
link = node.get("l") or node.get("link")
- if isinstance(name, str) and name.strip() and isinstance(link, str) and link.strip():
+ if isinstance(name,
+ str) and name.strip() and isinstance(link,
+ str) and link.strip():
rel_parts = prefix + [name.strip()]
relpath = "/".join([p for p in rel_parts if p])
enriched = dict(node)
@@ -222,7 +227,8 @@ class AllDebrid(Provider):
self,
query: str,
limit: int = 50,
- filters: Optional[Dict[str, Any]] = None,
+ filters: Optional[Dict[str,
+ Any]] = None,
**kwargs: Any,
) -> List[SearchResult]:
q = (query or "").strip()
@@ -247,7 +253,9 @@ class AllDebrid(Provider):
return []
q_lower = q.lower()
- needle = "" if q_lower in {"*", "all", "list"} else q_lower
+ needle = "" if q_lower in {"*",
+ "all",
+ "list"} else q_lower
# Second-stage: list files for a specific magnet id.
if view == "files":
@@ -262,17 +270,16 @@ class AllDebrid(Provider):
except Exception:
return []
- magnet_status: Dict[str, Any] = {}
+ magnet_status: Dict[str,
+ Any] = {}
try:
magnet_status = client.magnet_status(magnet_id)
except Exception:
magnet_status = {}
magnet_name = str(
- magnet_status.get("filename")
- or magnet_status.get("name")
- or magnet_status.get("hash")
- or f"magnet-{magnet_id}"
+ magnet_status.get("filename") or magnet_status.get("name")
+ or magnet_status.get("hash") or f"magnet-{magnet_id}"
)
status_code = magnet_status.get("statusCode")
status_text = str(magnet_status.get("status") or "").strip() or "unknown"
@@ -285,25 +292,40 @@ class AllDebrid(Provider):
title=magnet_name,
path=f"alldebrid:magnet:{magnet_id}",
detail=status_text,
- annotations=["folder", "not-ready"],
+ annotations=["folder",
+ "not-ready"],
media_kind="folder",
- tag={"alldebrid", "folder", str(magnet_id), "not-ready"},
+ tag={"alldebrid",
+ "folder",
+ str(magnet_id),
+ "not-ready"},
columns=[
- ("Folder", magnet_name),
- ("ID", str(magnet_id)),
- ("Status", status_text),
- ("Ready", "no"),
+ ("Folder",
+ magnet_name),
+ ("ID",
+ str(magnet_id)),
+ ("Status",
+ status_text),
+ ("Ready",
+ "no"),
],
- full_metadata={"magnet": magnet_status, "magnet_id": magnet_id},
+ full_metadata={
+ "magnet": magnet_status,
+ "magnet_id": magnet_id
+ },
)
]
try:
files_result = client.magnet_links([magnet_id])
magnet_files = (
- files_result.get(str(magnet_id), {}) if isinstance(files_result, dict) else {}
+ files_result.get(str(magnet_id),
+ {}) if isinstance(files_result,
+ dict) else {}
)
- file_tree = magnet_files.get("files", []) if isinstance(magnet_files, dict) else []
+ file_tree = magnet_files.get("files",
+ []) if isinstance(magnet_files,
+ dict) else []
except Exception as exc:
log(
f"[alldebrid] Failed to list files for magnet {magnet_id}: {exc}",
@@ -313,8 +335,10 @@ class AllDebrid(Provider):
results: List[SearchResult] = []
for file_node in self._flatten_files(file_tree):
- file_name = str(file_node.get("n") or file_node.get("name") or "").strip()
- file_url = str(file_node.get("l") or file_node.get("link") or "").strip()
+ file_name = str(file_node.get("n") or file_node.get("name")
+ or "").strip()
+ file_url = str(file_node.get("l") or file_node.get("link")
+ or "").strip()
relpath = str(file_node.get("_relpath") or file_name or "").strip()
file_size = file_node.get("s") or file_node.get("size")
if not file_name or not file_url:
@@ -341,11 +365,16 @@ class AllDebrid(Provider):
annotations=["file"],
media_kind="file",
size_bytes=size_bytes,
- tag={"alldebrid", "file", str(magnet_id)},
+ tag={"alldebrid",
+ "file",
+ str(magnet_id)},
columns=[
- ("File", file_name),
- ("Folder", magnet_name),
- ("ID", str(magnet_id)),
+ ("File",
+ file_name),
+ ("Folder",
+ magnet_name),
+ ("ID",
+ str(magnet_id)),
],
full_metadata={
"magnet": magnet_status,
@@ -386,9 +415,7 @@ class AllDebrid(Provider):
continue
magnet_name = str(
- magnet.get("filename")
- or magnet.get("name")
- or magnet.get("hash")
+ magnet.get("filename") or magnet.get("name") or magnet.get("hash")
or f"magnet-{magnet_id}"
)
magnet_name_lower = magnet_name.lower()
@@ -422,15 +449,24 @@ class AllDebrid(Provider):
annotations=["folder"],
media_kind="folder",
size_bytes=size_bytes,
- tag={"alldebrid", "folder", str(magnet_id)}
+ tag={"alldebrid",
+ "folder",
+ str(magnet_id)}
| ({"ready"} if ready else {"not-ready"}),
columns=[
- ("Folder", magnet_name),
- ("ID", str(magnet_id)),
- ("Status", status_text),
- ("Ready", "yes" if ready else "no"),
+ ("Folder",
+ magnet_name),
+ ("ID",
+ str(magnet_id)),
+ ("Status",
+ status_text),
+ ("Ready",
+ "yes" if ready else "no"),
],
- full_metadata={"magnet": magnet, "magnet_id": magnet_id},
+ full_metadata={
+ "magnet": magnet,
+ "magnet_id": magnet_id
+ },
)
)
diff --git a/Provider/bandcamp.py b/Provider/bandcamp.py
index 064e47d..1598605 100644
--- a/Provider/bandcamp.py
+++ b/Provider/bandcamp.py
@@ -38,9 +38,10 @@ class Bandcamp(Provider):
# Bandcamp discography lives under /music.
return base.rstrip("/") + "/music"
- def _scrape_artist_page(
- self, page: Any, artist_url: str, limit: int = 50
- ) -> List[SearchResult]:
+ def _scrape_artist_page(self,
+ page: Any,
+ artist_url: str,
+ limit: int = 50) -> List[SearchResult]:
"""Scrape an artist page for albums/tracks (discography)."""
base = self._base_url(artist_url)
discography_url = self._discography_url(artist_url)
@@ -75,7 +76,8 @@ class Bandcamp(Provider):
else:
target = base.rstrip("/") + "/" + href
- title_node = item.query_selector("p.title") or item.query_selector(".title")
+ title_node = item.query_selector("p.title"
+ ) or item.query_selector(".title")
title = title_node.inner_text().strip() if title_node else ""
if title:
title = " ".join(title.split())
@@ -83,7 +85,8 @@ class Bandcamp(Provider):
title = target.rsplit("/", 1)[-1]
kind = (
- "album" if "/album/" in target else ("track" if "/track/" in target else "item")
+ "album" if "/album/" in target else
+ ("track" if "/track/" in target else "item")
)
results.append(
@@ -95,9 +98,12 @@ class Bandcamp(Provider):
annotations=[kind],
media_kind="audio",
columns=[
- ("Title", title),
- ("Type", kind),
- ("Url", target),
+ ("Title",
+ title),
+ ("Type",
+ kind),
+ ("Url",
+ target),
],
full_metadata={
"type": kind,
@@ -112,7 +118,12 @@ class Bandcamp(Provider):
return results
def selector(
- self, selected_items: List[Any], *, ctx: Any, stage_is_last: bool = True, **_kwargs: Any
+ self,
+ selected_items: List[Any],
+ *,
+ ctx: Any,
+ stage_is_last: bool = True,
+ **_kwargs: Any
) -> bool:
"""Handle Bandcamp `@N` selection.
@@ -128,7 +139,8 @@ class Bandcamp(Provider):
# Only handle artist selections.
chosen: List[Dict[str, Any]] = []
for item in selected_items or []:
- payload: Dict[str, Any] = {}
+ payload: Dict[str,
+ Any] = {}
if isinstance(item, dict):
payload = item
else:
@@ -140,11 +152,21 @@ class Bandcamp(Provider):
if not payload:
try:
payload = {
- "title": getattr(item, "title", None),
- "url": getattr(item, "url", None),
- "path": getattr(item, "path", None),
- "metadata": getattr(item, "metadata", None),
- "extra": getattr(item, "extra", None),
+ "title": getattr(item,
+ "title",
+ None),
+ "url": getattr(item,
+ "url",
+ None),
+ "path": getattr(item,
+ "path",
+ None),
+ "metadata": getattr(item,
+ "metadata",
+ None),
+ "extra": getattr(item,
+ "extra",
+ None),
}
except Exception:
payload = {}
@@ -154,7 +176,10 @@ class Bandcamp(Provider):
meta = {}
extra = payload.get("extra")
if isinstance(extra, dict):
- meta = {**meta, **extra}
+ meta = {
+ **meta,
+ **extra
+ }
type_val = str(meta.get("type") or "").strip().lower()
if type_val != "artist":
@@ -169,7 +194,11 @@ class Bandcamp(Provider):
continue
chosen.append(
- {"title": title, "url": base, "location": str(meta.get("artist") or "").strip()}
+ {
+ "title": title,
+ "url": base,
+ "location": str(meta.get("artist") or "").strip()
+ }
)
if not chosen:
@@ -211,8 +240,12 @@ class Bandcamp(Provider):
results_payload.append(
{
"table": "bandcamp",
- "title": getattr(r, "title", ""),
- "path": getattr(r, "path", ""),
+ "title": getattr(r,
+ "title",
+ ""),
+ "path": getattr(r,
+ "path",
+ ""),
}
)
@@ -234,7 +267,8 @@ class Bandcamp(Provider):
self,
query: str,
limit: int = 50,
- filters: Optional[Dict[str, Any]] = None,
+ filters: Optional[Dict[str,
+ Any]] = None,
**kwargs: Any,
) -> List[SearchResult]:
if sync_playwright is None:
@@ -305,10 +339,14 @@ class Bandcamp(Provider):
annotations=[media_type],
media_kind="audio",
columns=[
- ("Title", title),
- ("Location", artist),
- ("Type", media_type),
- ("Url", base_url or str(target_url or "")),
+ ("Title",
+ title),
+ ("Location",
+ artist),
+ ("Type",
+ media_type),
+ ("Url",
+ base_url or str(target_url or "")),
],
full_metadata={
"artist": artist,
diff --git a/Provider/fileio.py b/Provider/fileio.py
index ac14013..b43560b 100644
--- a/Provider/fileio.py
+++ b/Provider/fileio.py
@@ -54,7 +54,8 @@ class FileIO(Provider):
def __init__(self, config: Optional[Dict[str, Any]] = None):
super().__init__(config)
conf = _pick_provider_config(self.config)
- self._base_url = str(conf.get("base_url") or "https://file.io").strip().rstrip("/")
+ self._base_url = str(conf.get("base_url")
+ or "https://file.io").strip().rstrip("/")
self._api_key = conf.get("api_key")
self._default_expires = conf.get("expires")
self._default_max_downloads = conf.get("maxDownloads")
@@ -74,12 +75,19 @@ class FileIO(Provider):
if not os.path.exists(file_path):
raise FileNotFoundError(f"File not found: {file_path}")
- data: Dict[str, Any] = {}
+ data: Dict[str,
+ Any] = {}
expires = kwargs.get("expires", self._default_expires)
max_downloads = kwargs.get(
- "maxDownloads", kwargs.get("max_downloads", self._default_max_downloads)
+ "maxDownloads",
+ kwargs.get("max_downloads",
+ self._default_max_downloads)
+ )
+ auto_delete = kwargs.get(
+ "autoDelete",
+ kwargs.get("auto_delete",
+ self._default_auto_delete)
)
- auto_delete = kwargs.get("autoDelete", kwargs.get("auto_delete", self._default_auto_delete))
if expires not in (None, ""):
data["expires"] = expires
@@ -88,7 +96,11 @@ class FileIO(Provider):
if auto_delete not in (None, ""):
data["autoDelete"] = auto_delete
- headers: Dict[str, str] = {"User-Agent": "Medeia-Macina/1.0", "Accept": "application/json"}
+ headers: Dict[str,
+ str] = {
+ "User-Agent": "Medeia-Macina/1.0",
+ "Accept": "application/json"
+ }
if isinstance(self._api_key, str) and self._api_key.strip():
# Some file.io plans use bearer tokens; keep optional.
headers["Authorization"] = f"Bearer {self._api_key.strip()}"
@@ -101,19 +113,28 @@ class FileIO(Provider):
total = os.path.getsize(file_path)
except Exception:
total = None
- wrapped = ProgressFileReader(handle, total_bytes=total, label="upload")
+ wrapped = ProgressFileReader(
+ handle,
+ total_bytes=total,
+ label="upload"
+ )
response = client.request(
"POST",
f"{self._base_url}/upload",
data=data or None,
- files={"file": (filename, wrapped)},
+ files={
+ "file": (filename,
+ wrapped)
+ },
follow_redirects=True,
raise_for_status=False,
)
if response.status_code >= 400:
- location = response.headers.get("location") or response.headers.get("Location")
- ct = response.headers.get("content-type") or response.headers.get("Content-Type")
+ location = response.headers.get("location"
+ ) or response.headers.get("Location")
+ ct = response.headers.get("content-type"
+ ) or response.headers.get("Content-Type")
raise Exception(
f"Upload failed: {response.status_code} (content-type={ct}, location={location}) - {response.text}"
)
@@ -127,7 +148,8 @@ class FileIO(Provider):
# If the server ignored our Accept header and returned HTML, this is almost
# certainly the wrong endpoint or an upstream block.
ct = (
- response.headers.get("content-type") or response.headers.get("Content-Type") or ""
+ response.headers.get("content-type")
+ or response.headers.get("Content-Type") or ""
).lower()
if (payload is None) and ("text/html" in ct):
raise Exception(
@@ -135,7 +157,8 @@ class FileIO(Provider):
)
if isinstance(payload, dict) and payload.get("success") is False:
- reason = payload.get("message") or payload.get("error") or payload.get("status")
+ reason = payload.get("message"
+ ) or payload.get("error") or payload.get("status")
raise Exception(str(reason or "Upload failed"))
uploaded_url = _extract_link(payload)
@@ -166,9 +189,11 @@ class FileIO(Provider):
if pipe_obj is not None:
from Store import Store
- Store(self.config, suppress_debug=True).try_add_url_for_pipe_object(
- pipe_obj, uploaded_url
- )
+ Store(
+ self.config,
+ suppress_debug=True
+ ).try_add_url_for_pipe_object(pipe_obj,
+ uploaded_url)
except Exception:
pass
diff --git a/Provider/internetarchive.py b/Provider/internetarchive.py
index cd96df7..51cb111 100644
--- a/Provider/internetarchive.py
+++ b/Provider/internetarchive.py
@@ -35,7 +35,9 @@ def _pick_provider_config(config: Any) -> Dict[str, Any]:
def _looks_fielded_query(q: str) -> bool:
low = (q or "").lower()
- return (":" in low) or (" and " in low) or (" or " in low) or (" not " in low) or ("(" in low)
+ return (":" in low) or (" and " in low) or (" or "
+ in low) or (" not "
+ in low) or ("(" in low)
def _extract_identifier_from_any(value: str) -> str:
@@ -111,9 +113,7 @@ def is_download_file_url(url: str) -> bool:
return False
# /download//
return (
- len(parts) >= 3
- and parts[0].lower() == "download"
- and bool(parts[1].strip())
+ len(parts) >= 3 and parts[0].lower() == "download" and bool(parts[1].strip())
and bool(parts[2].strip())
)
@@ -158,9 +158,15 @@ def list_download_files(identifier: str) -> List[Dict[str, Any]]:
files.append(
{
"name": str(name),
- "size": getattr(f, "size", None),
- "format": getattr(f, "format", None),
- "source": getattr(f, "source", None),
+ "size": getattr(f,
+ "size",
+ None),
+ "format": getattr(f,
+ "format",
+ None),
+ "source": getattr(f,
+ "source",
+ None),
}
)
except Exception:
@@ -179,13 +185,16 @@ def list_download_files(identifier: str) -> List[Dict[str, Any]]:
if source == "metadata":
return True
- if fmt in {"metadata", "archive bittorrent"}:
+ if fmt in {"metadata",
+ "archive bittorrent"}:
return True
if fmt.startswith("thumbnail"):
return True
return False
- candidates = [f for f in files if isinstance(f, dict) and not _is_ia_metadata_file(f)]
+ candidates = [
+ f for f in files if isinstance(f, dict) and not _is_ia_metadata_file(f)
+ ]
if not candidates:
candidates = [f for f in files if isinstance(f, dict)]
@@ -266,7 +275,8 @@ def _best_file_candidate(files: List[Dict[str, Any]]) -> Optional[Dict[str, Any]
fmt = str(f.get("format") or "").strip().lower()
if source == "metadata":
return True
- if fmt in {"metadata", "archive bittorrent"}:
+ if fmt in {"metadata",
+ "archive bittorrent"}:
return True
if fmt.startswith("thumbnail"):
return True
@@ -283,7 +293,10 @@ def _best_file_candidate(files: List[Dict[str, Any]]) -> Optional[Dict[str, Any]
candidates = list(files)
# Prefer originals.
- originals = [f for f in candidates if str(f.get("source") or "").strip().lower() == "original"]
+ originals = [
+ f for f in candidates
+ if str(f.get("source") or "").strip().lower() == "original"
+ ]
pool = originals if originals else candidates
pool = [f for f in pool if str(f.get("name") or "").strip()]
@@ -330,7 +343,8 @@ class InternetArchive(Provider):
mt = str(mediatype or "").strip().lower()
if mt in {"texts"}:
return "book"
- if mt in {"audio", "etree"}:
+ if mt in {"audio",
+ "etree"}:
return "audio"
if mt in {"movies"}:
return "video"
@@ -342,7 +356,8 @@ class InternetArchive(Provider):
self,
query: str,
limit: int = 50,
- filters: Optional[Dict[str, Any]] = None,
+ filters: Optional[Dict[str,
+ Any]] = None,
**_kwargs: Any,
) -> List[SearchResult]:
ia = _ia()
@@ -355,7 +370,8 @@ class InternetArchive(Provider):
return []
# If the user supplied a plain string, default to title search.
- if not _looks_fielded_query(q) and q not in {"*", "*.*"}:
+ if not _looks_fielded_query(q) and q not in {"*",
+ "*.*"}:
q = f'title:("{q}")'
fields = [
@@ -419,10 +435,14 @@ class InternetArchive(Provider):
size_bytes=None,
tag=set(),
columns=[
- ("title", title),
- ("mediatype", mediatype),
- ("date", date),
- ("creator", creator),
+ ("title",
+ title),
+ ("mediatype",
+ mediatype),
+ ("date",
+ date),
+ ("creator",
+ creator),
],
full_metadata=dict(row),
)
@@ -437,7 +457,12 @@ class InternetArchive(Provider):
- https://archive.org/details/
- https://archive.org/download//
"""
- sr = SearchResult(table="internetarchive", title=str(url), path=str(url), full_metadata={})
+ sr = SearchResult(
+ table="internetarchive",
+ title=str(url),
+ path=str(url),
+ full_metadata={}
+ )
return self.download(sr, output_dir)
def download(self, result: SearchResult, output_dir: Path) -> Optional[Path]:
@@ -449,7 +474,11 @@ class InternetArchive(Provider):
if not callable(download_fn):
raise Exception("internetarchive.download is not available")
- identifier = _extract_identifier_from_any(str(getattr(result, "path", "") or ""))
+ identifier = _extract_identifier_from_any(
+ str(getattr(result,
+ "path",
+ "") or "")
+ )
if not identifier:
return None
@@ -490,9 +519,15 @@ class InternetArchive(Provider):
files.append(
{
"name": str(name),
- "size": getattr(f, "size", None),
- "format": getattr(f, "format", None),
- "source": getattr(f, "source", None),
+ "size": getattr(f,
+ "size",
+ None),
+ "format": getattr(f,
+ "format",
+ None),
+ "source": getattr(f,
+ "source",
+ None),
}
)
except Exception:
@@ -616,7 +651,8 @@ class InternetArchive(Provider):
if not identifier:
raise Exception("Could not determine Internet Archive identifier")
- meta: Dict[str, Any] = {}
+ meta: Dict[str,
+ Any] = {}
if title:
meta["title"] = title
else:
@@ -628,7 +664,10 @@ class InternetArchive(Provider):
meta["mediatype"] = self._mediatype.strip()
# Build upload options; credentials are optional if the user has internetarchive configured globally.
- upload_kwargs: Dict[str, Any] = {"metadata": meta}
+ upload_kwargs: Dict[str,
+ Any] = {
+ "metadata": meta
+ }
ak = os.getenv("IA_ACCESS_KEY") or self._access_key
sk = os.getenv("IA_SECRET_KEY") or self._secret_key
if isinstance(ak, str) and ak.strip():
@@ -638,7 +677,9 @@ class InternetArchive(Provider):
# Use a friendly uploaded filename.
upload_name = sanitize_filename(p.name)
- files = {upload_name: str(p)}
+ files = {
+ upload_name: str(p)
+ }
try:
resp: Any = upload_fn(identifier, files=files, **upload_kwargs)
@@ -664,9 +705,11 @@ class InternetArchive(Provider):
if pipe_obj is not None:
from Store import Store
- Store(self.config, suppress_debug=True).try_add_url_for_pipe_object(
- pipe_obj, item_url
- )
+ Store(
+ self.config,
+ suppress_debug=True
+ ).try_add_url_for_pipe_object(pipe_obj,
+ item_url)
except Exception:
pass
diff --git a/Provider/libgen.py b/Provider/libgen.py
index a2b87ad..8719866 100644
--- a/Provider/libgen.py
+++ b/Provider/libgen.py
@@ -15,7 +15,6 @@ from ProviderCore.download import sanitize_filename
from SYS.logger import log
from models import ProgressBar
-
# Optional dependency for HTML scraping fallbacks
try:
from lxml import html as lxml_html
@@ -111,9 +110,7 @@ def _parse_libgen_ads_tags_html(html: str) -> Dict[str, Any]:
score = 0
for ln in lines:
lo = ln.lower()
- if ":" in ln and any(
- k in lo
- for k in (
+ if ":" in ln and any(k in lo for k in (
"title",
"author",
"publisher",
@@ -121,9 +118,7 @@ def _parse_libgen_ads_tags_html(html: str) -> Dict[str, Any]:
"isbn",
"language",
"series",
- "tags",
- )
- ):
+ "tags", )):
score += 1
if score > best_score:
best_score = score
@@ -133,15 +128,20 @@ def _parse_libgen_ads_tags_html(html: str) -> Dict[str, Any]:
if not best_lines:
best_lines = _strip_html_to_lines(s)
- raw_fields: Dict[str, str] = {}
+ raw_fields: Dict[str,
+ str] = {}
pending_key: Optional[str] = None
def _norm_key(k: str) -> str:
kk = str(k or "").strip().lower()
kk = re.sub(r"\s+", " ", kk)
- if kk in {"authors", "author(s)", "author(s).", "author(s):"}:
+ if kk in {"authors",
+ "author(s)",
+ "author(s).",
+ "author(s):"}:
return "author"
- if kk in {"tag", "tags"}:
+ if kk in {"tag",
+ "tags"}:
return "tags"
return kk
@@ -166,7 +166,10 @@ def _parse_libgen_ads_tags_html(html: str) -> Dict[str, Any]:
raw_fields[pending_key] = line
pending_key = None
- out: Dict[str, Any] = {"_raw_fields": dict(raw_fields)}
+ out: Dict[str,
+ Any] = {
+ "_raw_fields": dict(raw_fields)
+ }
title = str(raw_fields.get("title") or "").strip()
if title:
@@ -272,9 +275,11 @@ def _prefer_isbn(isbns: List[str]) -> str:
return vals[0] if vals else ""
-def _enrich_book_tags_from_isbn(
- isbn: str, *, config: Optional[Dict[str, Any]] = None
-) -> Tuple[List[str], str]:
+def _enrich_book_tags_from_isbn(isbn: str,
+ *,
+ config: Optional[Dict[str,
+ Any]] = None) -> Tuple[List[str],
+ str]:
"""Return (tags, source_name) for the given ISBN.
Priority:
@@ -378,7 +383,8 @@ def _enrich_book_tags_from_isbn(
try:
from Provider.metadata_provider import get_metadata_provider
- provider = get_metadata_provider("isbnsearch", config or {})
+ provider = get_metadata_provider("isbnsearch",
+ config or {})
if provider is None:
return [], ""
items = provider.search(isbn_clean, limit=1)
@@ -393,7 +399,10 @@ def _enrich_book_tags_from_isbn(
def _fetch_libgen_details_html(
- url: str, *, timeout: Optional[Tuple[float, float]] = None
+ url: str,
+ *,
+ timeout: Optional[Tuple[float,
+ float]] = None
) -> Optional[str]:
try:
if timeout is None:
@@ -401,7 +410,8 @@ def _fetch_libgen_details_html(
session = requests.Session()
session.headers.update(
{
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0 Safari/537.36",
+ "User-Agent":
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0 Safari/537.36",
}
)
with session.get(str(url), stream=True, timeout=timeout) as resp:
@@ -420,14 +430,16 @@ def _parse_libgen_details_html(html: str) -> Dict[str, Any]:
Best-effort and intentionally tolerant of mirror variations.
"""
- out: Dict[str, Any] = {}
- raw_fields: Dict[str, str] = {}
+ out: Dict[str,
+ Any] = {}
+ raw_fields: Dict[str,
+ str] = {}
s = str(html or "")
# Fast path: try to pull simple Label/Value table rows.
for m in re.finditer(
- r"(?is)]*>\s*]*>\s*([^<]{1,80}?)\s*:??\s*\s*]*>(.*?)\s*
",
- s,
+ r"(?is)]*>\s*]*>\s*([^<]{1,80}?)\s*:??\s*\s*]*>(.*?)\s*
",
+ s,
):
label = _strip_html_to_text(m.group(1))
raw_val_html = str(m.group(2) or "")
@@ -467,7 +479,8 @@ def _parse_libgen_details_html(html: str) -> Dict[str, Any]:
chunk_start = m.end()
chunk_end = (
- strong_matches[idx + 1].start() if (idx + 1) < len(strong_matches) else len(s)
+ strong_matches[idx + 1].start() if
+ (idx + 1) < len(strong_matches) else len(s)
)
raw_val_html = s[chunk_start:chunk_end]
@@ -619,17 +632,17 @@ def _libgen_metadata_to_tags(meta: Dict[str, Any]) -> List[str]:
for k, v in raw_fields.items():
lk = str(k or "").strip().lower()
if lk in {
- "title",
- "author(s)",
- "authors",
- "author",
- "publisher",
- "year",
- "isbn",
- "language",
- "oclc/worldcat",
- "tags",
- "edition id",
+ "title",
+ "author(s)",
+ "authors",
+ "author",
+ "publisher",
+ "year",
+ "isbn",
+ "language",
+ "oclc/worldcat",
+ "tags",
+ "edition id",
}:
continue
vv = str(v or "").strip()
@@ -658,13 +671,14 @@ class Libgen(Provider):
self,
query: str,
limit: int = 50,
- filters: Optional[Dict[str, Any]] = None,
+ filters: Optional[Dict[str,
+ Any]] = None,
**kwargs: Any,
) -> List[SearchResult]:
filters = filters or {}
try:
- from cli_syntax import get_field, get_free_text, parse_query
+ from SYS.cli_syntax import get_field, get_free_text, parse_query
from SYS.logger import is_debug_enabled
parsed = parse_query(query)
@@ -701,10 +715,14 @@ class Libgen(Provider):
mirror_url = book.get("mirror_url", "")
columns = [
- ("Title", title),
- ("Author", author),
- ("Pages", str(pages)),
- ("Ext", str(extension)),
+ ("Title",
+ title),
+ ("Author",
+ author),
+ ("Pages",
+ str(pages)),
+ ("Ext",
+ str(extension)),
]
detail = f"By: {author}"
@@ -732,8 +750,10 @@ class Libgen(Provider):
"filesize": filesize,
"pages": pages,
"extension": extension,
- "book_id": book.get("book_id", ""),
- "md5": book.get("md5", ""),
+ "book_id": book.get("book_id",
+ ""),
+ "md5": book.get("md5",
+ ""),
},
)
)
@@ -786,12 +806,9 @@ class Libgen(Provider):
title = ""
base_name = sanitize_filename(
- title
- or md5
- or (
+ title or md5 or (
f"libgen_{_libgen_id_from_url(target)}"
- if _libgen_id_from_url(target)
- else "libgen"
+ if _libgen_id_from_url(target) else "libgen"
)
)
out_path = output_dir / base_name
@@ -819,9 +836,12 @@ class Libgen(Provider):
if now - last_progress_time[0] < 0.5:
return
- total = int(content_length) if content_length and content_length > 0 else None
+ total = int(
+ content_length
+ ) if content_length and content_length > 0 else None
downloaded = (
- int(bytes_downloaded) if bytes_downloaded and bytes_downloaded > 0 else 0
+ int(bytes_downloaded)
+ if bytes_downloaded and bytes_downloaded > 0 else 0
)
elapsed = max(0.001, now - start_time)
speed = downloaded / elapsed
@@ -850,15 +870,19 @@ class Libgen(Provider):
# enrichment (OpenLibrary/isbnsearch) unless the user later chooses to.
if ("/ads.php" in low) or ("/get.php" in low):
ads_url = (
- target if "/ads.php" in low else _libgen_ads_url_for_target(target)
+ target if "/ads.php" in low else
+ _libgen_ads_url_for_target(target)
)
if ads_url:
html = _fetch_libgen_details_html(
- ads_url, timeout=(DEFAULT_CONNECT_TIMEOUT, 4.0)
+ ads_url,
+ timeout=(DEFAULT_CONNECT_TIMEOUT,
+ 4.0)
)
if html:
meta = _parse_libgen_ads_tags_html(html)
- extracted_title = str(meta.get("title") or "").strip()
+ extracted_title = str(meta.get("title")
+ or "").strip()
if extracted_title:
md["title"] = extracted_title
result.tag.add(f"title:{extracted_title}")
@@ -867,8 +891,8 @@ class Libgen(Provider):
authors = (
meta.get("authors")
- if isinstance(meta.get("authors"), list)
- else []
+ if isinstance(meta.get("authors"),
+ list) else []
)
for a in authors or []:
aa = str(a or "").strip()
@@ -892,11 +916,12 @@ class Libgen(Provider):
isbns = (
meta.get("isbn")
- if isinstance(meta.get("isbn"), list)
- else []
+ if isinstance(meta.get("isbn"),
+ list) else []
)
isbns = [
- str(x).strip() for x in (isbns or []) if str(x).strip()
+ str(x).strip() for x in (isbns or [])
+ if str(x).strip()
]
if isbns:
md["isbn"] = isbns
@@ -905,8 +930,8 @@ class Libgen(Provider):
free_tags = (
meta.get("tags")
- if isinstance(meta.get("tags"), list)
- else []
+ if isinstance(meta.get("tags"),
+ list) else []
)
for t in free_tags or []:
tt = str(t or "").strip()
@@ -919,29 +944,28 @@ class Libgen(Provider):
for k, v in raw_fields.items():
lk = str(k or "").strip().lower()
if lk in {
- "title",
- "author",
- "authors",
- "publisher",
- "year",
- "isbn",
- "language",
- "tags",
+ "title",
+ "author",
+ "authors",
+ "publisher",
+ "year",
+ "isbn",
+ "language",
+ "tags",
}:
continue
vv = str(v or "").strip()
if not vv:
continue
- ns = re.sub(r"[^a-z0-9]+", "_", lk).strip("_")
+ ns = re.sub(r"[^a-z0-9]+",
+ "_",
+ lk).strip("_")
if ns:
result.tag.add(f"libgen_{ns}:{vv}")
# Legacy: edition/file/series details pages (title + ISBN) + external enrichment.
- if (
- ("/edition.php" in low)
- or ("/file.php" in low)
- or ("/series.php" in low)
- ):
+ if (("/edition.php" in low) or ("/file.php" in low)
+ or ("/series.php" in low)):
html = _fetch_libgen_details_html(target)
if html:
meta = _parse_libgen_details_html(html)
@@ -953,11 +977,12 @@ class Libgen(Provider):
extracted_title = str(meta.get("title") or "").strip()
extracted_isbns = (
- meta.get("isbn") if isinstance(meta.get("isbn"), list) else []
+ meta.get("isbn")
+ if isinstance(meta.get("isbn"),
+ list) else []
)
extracted_isbns = [
- str(x).strip()
- for x in (extracted_isbns or [])
+ str(x).strip() for x in (extracted_isbns or [])
if str(x).strip()
]
@@ -967,7 +992,9 @@ class Libgen(Provider):
if extracted_isbns:
md["isbn"] = extracted_isbns
for isbn_val in extracted_isbns:
- isbn_norm = str(isbn_val).strip().replace("-", "")
+ isbn_norm = str(isbn_val
+ ).strip().replace("-",
+ "")
if isbn_norm:
result.tag.add(f"isbn:{isbn_norm}")
if meta.get("edition_id"):
@@ -987,7 +1014,8 @@ class Libgen(Provider):
if enriched_source:
md["metadata_enriched_from"] = enriched_source
- if extracted_title and ((not title) or title.startswith("http")):
+ if extracted_title and ((not title)
+ or title.startswith("http")):
title = extracted_title
except Exception:
pass
@@ -1041,7 +1069,8 @@ class LibgenSearch:
self.session = session or requests.Session()
self.session.headers.update(
{
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
+ "User-Agent":
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}
)
@@ -1052,7 +1081,8 @@ class LibgenSearch:
limit: int,
*,
timeout: Any = DEFAULT_TIMEOUT,
- ) -> List[Dict[str, Any]]:
+ ) -> List[Dict[str,
+ Any]]:
"""Search libgen.rs/is/st JSON API when available.
Many LibGen mirrors expose /json.php which is less brittle than scraping.
@@ -1060,7 +1090,9 @@ class LibgenSearch:
url = f"{mirror}/json.php"
params = {
"req": query,
- "res": max(1, min(100, int(limit) if limit else 50)),
+ "res": max(1,
+ min(100,
+ int(limit) if limit else 50)),
"column": "def",
"phrase": 1,
}
@@ -1086,7 +1118,8 @@ class LibgenSearch:
pages = item.get("Pages") or item.get("pages") or ""
language = item.get("Language") or item.get("language") or ""
size = item.get("Size") or item.get("size") or item.get("filesize") or ""
- extension = item.get("Extension") or item.get("extension") or item.get("ext") or ""
+ extension = item.get("Extension") or item.get("extension"
+ ) or item.get("ext") or ""
md5 = item.get("MD5") or item.get("md5") or ""
download_link = f"http://library.lol/main/{md5}" if md5 else ""
@@ -1121,7 +1154,8 @@ class LibgenSearch:
total_timeout: float = DEFAULT_SEARCH_TOTAL_TIMEOUT,
log_info: LogFn = None,
log_error: ErrorFn = None,
- ) -> List[Dict[str, Any]]:
+ ) -> List[Dict[str,
+ Any]]:
"""Search LibGen mirrors.
Uses a total time budget across mirrors to avoid long hangs.
@@ -1135,7 +1169,10 @@ class LibgenSearch:
elapsed = time.monotonic() - started
remaining = total_timeout - elapsed
if remaining <= 0:
- _call(log_error, f"[libgen] Search timed out after {total_timeout:.0f}s")
+ _call(
+ log_error,
+ f"[libgen] Search timed out after {total_timeout:.0f}s"
+ )
break
# Bound each request so we can try multiple mirrors within the budget.
@@ -1152,7 +1189,10 @@ class LibgenSearch:
results: List[Dict[str, Any]] = []
try:
results = self._search_libgen_json(
- mirror, query, limit, timeout=request_timeout
+ mirror,
+ query,
+ limit,
+ timeout=request_timeout
)
except Exception:
results = []
@@ -1163,11 +1203,17 @@ class LibgenSearch:
if "libgen.li" in mirror or "libgen.gl" in mirror:
results = self._search_libgen_li(
- mirror, query, limit, timeout=request_timeout
+ mirror,
+ query,
+ limit,
+ timeout=request_timeout
)
else:
results = self._search_libgen_rs(
- mirror, query, limit, timeout=request_timeout
+ mirror,
+ query,
+ limit,
+ timeout=request_timeout
)
if results:
@@ -1192,7 +1238,8 @@ class LibgenSearch:
limit: int,
*,
timeout: Any = DEFAULT_TIMEOUT,
- ) -> List[Dict[str, Any]]:
+ ) -> List[Dict[str,
+ Any]]:
"""Search libgen.rs/is/st style mirrors."""
url = f"{mirror}/search.php"
params = {
@@ -1211,7 +1258,8 @@ class LibgenSearch:
return []
def _text(el: Any) -> str:
- return " ".join([t.strip() for t in el.itertext() if t and str(t).strip()]).strip()
+ return " ".join([t.strip() for t in el.itertext()
+ if t and str(t).strip()]).strip()
try:
doc = lxml_html.fromstring(resp.content)
@@ -1314,13 +1362,16 @@ class LibgenSearch:
limit: int,
*,
timeout: Any = DEFAULT_TIMEOUT,
- ) -> List[Dict[str, Any]]:
+ ) -> List[Dict[str,
+ Any]]:
"""Search libgen.li/gl style mirrors."""
url = f"{mirror}/index.php"
params = {
"req": query,
# Keep the request lightweight; covers slow the HTML response.
- "res": max(1, min(100, int(limit) if limit else 50)),
+ "res": max(1,
+ min(100,
+ int(limit) if limit else 50)),
"covers": "off",
"filesuns": "all",
}
@@ -1332,7 +1383,8 @@ class LibgenSearch:
return []
def _text(el: Any) -> str:
- return " ".join([t.strip() for t in el.itertext() if t and str(t).strip()]).strip()
+ return " ".join([t.strip() for t in el.itertext()
+ if t and str(t).strip()]).strip()
try:
doc = lxml_html.fromstring(resp.content)
@@ -1414,7 +1466,10 @@ class LibgenSearch:
# Extract ISBNs from meta cell (avoid using them as title)
# Matches 10 or 13-digit ISBN with optional leading 978/979.
- isbn_candidates = re.findall(r"\b(?:97[89])?\d{9}[\dXx]\b", meta_text)
+ isbn_candidates = re.findall(
+ r"\b(?:97[89])?\d{9}[\dXx]\b",
+ meta_text
+ )
if isbn_candidates:
seen: List[str] = []
for s in isbn_candidates:
@@ -1453,7 +1508,8 @@ class LibgenSearch:
best_score: Optional[tuple] = None
for cand in deduped:
low = cand.lower().strip()
- if low in {"cover", "edition"}:
+ if low in {"cover",
+ "edition"}:
continue
if _looks_like_isbn_blob(cand):
continue
@@ -1527,7 +1583,8 @@ def search_libgen(
log_info: LogFn = None,
log_error: ErrorFn = None,
session: Optional[requests.Session] = None,
-) -> List[Dict[str, Any]]:
+) -> List[Dict[str,
+ Any]]:
"""Search Libgen using the robust scraper."""
searcher = LibgenSearch(session=session)
try:
@@ -1572,7 +1629,9 @@ def _resolve_download_url(
# Handle edition -> file links.
m = re.search(
- r'href=["\']([^"\']*file\.php\?id=\d+[^"\']*)["\']', html, flags=re.IGNORECASE
+ r'href=["\']([^"\']*file\.php\?id=\d+[^"\']*)["\']',
+ html,
+ flags=re.IGNORECASE
)
if m:
href = str(m.group(1) or "").strip()
@@ -1581,7 +1640,9 @@ def _resolve_download_url(
# Handle series -> edition links.
m = re.search(
- r'href=["\']([^"\']*edition\.php\?id=\d+[^"\']*)["\']', html, flags=re.IGNORECASE
+ r'href=["\']([^"\']*edition\.php\?id=\d+[^"\']*)["\']',
+ html,
+ flags=re.IGNORECASE
)
if m:
href = str(m.group(1) or "").strip()
@@ -1611,7 +1672,11 @@ def _resolve_download_url(
return urljoin(base_url, href)
# Next: library.lol main links.
- m = re.search(r'href=["\']([^"\']*library\.lol[^"\']*)["\']', html, flags=re.IGNORECASE)
+ m = re.search(
+ r'href=["\']([^"\']*library\.lol[^"\']*)["\']',
+ html,
+ flags=re.IGNORECASE
+ )
if m:
href = str(m.group(1) or "").strip()
if href and not href.lower().startswith("javascript:"):
@@ -1632,7 +1697,8 @@ def _resolve_download_url(
def _find_href_by_text(doc: Any, pattern: str) -> Optional[str]:
for a in doc.xpath("//a[@href]"):
- t = " ".join([s.strip() for s in a.itertext() if s and str(s).strip()]).strip()
+ t = " ".join([s.strip() for s in a.itertext()
+ if s and str(s).strip()]).strip()
if t and re.search(pattern, t, re.IGNORECASE):
href = str(a.get("href") or "").strip()
if href and not href.lower().startswith("javascript:"):
@@ -1646,9 +1712,13 @@ def _resolve_download_url(
_call(log_info, f"[resolve] Checking: {current_url}")
- if current_url.lower().endswith(
- (".pdf", ".epub", ".mobi", ".djvu", ".azw3", ".cbz", ".cbr")
- ):
+ if current_url.lower().endswith((".pdf",
+ ".epub",
+ ".mobi",
+ ".djvu",
+ ".azw3",
+ ".cbz",
+ ".cbr")):
return current_url
try:
@@ -1676,7 +1746,10 @@ def _resolve_download_url(
if next_url:
current_url = next_url
continue
- _call(log_info, "[resolve] lxml not available and regex resolver found no links")
+ _call(
+ log_info,
+ "[resolve] lxml not available and regex resolver found no links"
+ )
return None
get_href = _find_href_by_text(doc, r"^GET$")
@@ -1722,12 +1795,16 @@ def _resolve_download_url(
return None
-def _guess_filename_extension(download_url: str, headers: Dict[str, str]) -> Optional[str]:
+def _guess_filename_extension(download_url: str,
+ headers: Dict[str,
+ str]) -> Optional[str]:
"""Guess the file extension from headers or the download URL."""
content_disposition = headers.get("content-disposition", "")
if content_disposition:
match = re.search(
- r"filename\*?=(?:UTF-8\'\'|\"?)([^\";]+)", content_disposition, flags=re.IGNORECASE
+ r"filename\*?=(?:UTF-8\'\'|\"?)([^\";]+)",
+ content_disposition,
+ flags=re.IGNORECASE
)
if match:
filename = unquote(match.group(1).strip('"'))
@@ -1787,8 +1864,11 @@ def download_from_mirror(
log_info: LogFn = None,
log_error: ErrorFn = None,
session: Optional[requests.Session] = None,
- progress_callback: Optional[Callable[[int, int], None]] = None,
-) -> Tuple[bool, Optional[Path]]:
+ progress_callback: Optional[Callable[[int,
+ int],
+ None]] = None,
+) -> Tuple[bool,
+ Optional[Path]]:
"""Download file from a LibGen mirror URL with optional progress tracking."""
session = session or requests.Session()
output_path = Path(output_path)
@@ -1807,7 +1887,8 @@ def download_from_mirror(
downloaded = 0
total_size = 0
- headers: Dict[str, str] = {}
+ headers: Dict[str,
+ str] = {}
with session.get(download_url, stream=True, timeout=60) as r:
r.raise_for_status()
diff --git a/Provider/loc.py b/Provider/loc.py
index ea7f1ff..fe57690 100644
--- a/Provider/loc.py
+++ b/Provider/loc.py
@@ -4,7 +4,7 @@ from typing import Any, Dict, List, Optional
from API.loc import LOCClient
from ProviderCore.base import Provider, SearchResult
-from cli_syntax import get_free_text, parse_query
+from SYS.cli_syntax import get_free_text, parse_query
from SYS.logger import log
@@ -23,13 +23,16 @@ class LOC(Provider):
self,
query: str,
limit: int = 50,
- filters: Optional[Dict[str, Any]] = None,
+ filters: Optional[Dict[str,
+ Any]] = None,
**kwargs: Any,
) -> List[SearchResult]:
_ = kwargs
parsed = parse_query(query or "")
text = get_free_text(parsed).strip()
- fields = parsed.get("fields", {}) if isinstance(parsed, dict) else {}
+ fields = parsed.get("fields",
+ {}) if isinstance(parsed,
+ dict) else {}
# Allow explicit q: override.
q = str(fields.get("q") or text or "").strip()
@@ -37,7 +40,8 @@ class LOC(Provider):
return []
# Pass through any extra filters supported by the LoC API.
- extra: Dict[str, Any] = {}
+ extra: Dict[str,
+ Any] = {}
if isinstance(filters, dict):
extra.update(filters)
if isinstance(fields, dict):
@@ -57,7 +61,10 @@ class LOC(Provider):
while len(results) < max(0, int(limit)):
payload = client.search_chronicling_america(
- q, start=start, count=page_size, extra_params=extra
+ q,
+ start=start,
+ count=page_size,
+ extra_params=extra
)
items = payload.get("results")
if not isinstance(items, list) or not items:
@@ -108,10 +115,14 @@ class LOC(Provider):
annotations=annotations,
media_kind="document",
columns=[
- ("Title", title),
- ("Date", date),
- ("Format", fmt_text),
- ("URL", url),
+ ("Title",
+ title),
+ ("Date",
+ date),
+ ("Format",
+ fmt_text),
+ ("URL",
+ url),
],
full_metadata=it,
)
diff --git a/Provider/matrix.py b/Provider/matrix.py
index aef219a..5f24523 100644
--- a/Provider/matrix.py
+++ b/Provider/matrix.py
@@ -11,8 +11,9 @@ import requests
from ProviderCore.base import Provider
-
-_MATRIX_INIT_CHECK_CACHE: Dict[str, Tuple[bool, Optional[str]]] = {}
+_MATRIX_INIT_CHECK_CACHE: Dict[str,
+ Tuple[bool,
+ Optional[str]]] = {}
def _sniff_mime_from_header(path: Path) -> Optional[str]:
@@ -79,9 +80,10 @@ def _sniff_mime_from_header(path: Path) -> Optional[str]:
return None
-def _classify_matrix_upload(
- path: Path, *, explicit_mime_type: Optional[str] = None
-) -> Tuple[str, str]:
+def _classify_matrix_upload(path: Path,
+ *,
+ explicit_mime_type: Optional[str] = None) -> Tuple[str,
+ str]:
"""Return (mime_type, msgtype) for Matrix uploads."""
mime_type = str(explicit_mime_type or "").strip() or None
@@ -94,9 +96,11 @@ def _classify_matrix_upload(
# Refinements based on extension for ambiguous containers.
ext = path.suffix.lower()
- if ext in {".m4a", ".aac"}:
+ if ext in {".m4a",
+ ".aac"}:
mime_type = mime_type or "audio/mp4"
- if ext in {".mkv", ".webm"}:
+ if ext in {".mkv",
+ ".webm"}:
mime_type = mime_type or "video/x-matroska"
if ext in {".ogv"}:
mime_type = mime_type or "video/ogg"
@@ -142,7 +146,13 @@ def _classify_matrix_upload(
".3gp",
".ogv",
}
- image_exts = {".jpg", ".jpeg", ".png", ".gif", ".webp", ".bmp", ".tiff"}
+ image_exts = {".jpg",
+ ".jpeg",
+ ".png",
+ ".gif",
+ ".webp",
+ ".bmp",
+ ".tiff"}
if ext in audio_exts:
msgtype = "m.audio"
elif ext in video_exts:
@@ -162,9 +172,10 @@ def _normalize_homeserver(value: str) -> str:
return text.rstrip("/")
-def _matrix_health_check(
- *, homeserver: str, access_token: Optional[str]
-) -> Tuple[bool, Optional[str]]:
+def _matrix_health_check(*,
+ homeserver: str,
+ access_token: Optional[str]) -> Tuple[bool,
+ Optional[str]]:
"""Lightweight Matrix reachability/auth validation.
- Always checks `/versions` (no auth).
@@ -180,9 +191,13 @@ def _matrix_health_check(
return False, f"Homeserver returned {resp.status_code}"
if access_token:
- headers = {"Authorization": f"Bearer {access_token}"}
+ headers = {
+ "Authorization": f"Bearer {access_token}"
+ }
resp = requests.get(
- f"{base}/_matrix/client/v3/account/whoami", headers=headers, timeout=5
+ f"{base}/_matrix/client/v3/account/whoami",
+ headers=headers,
+ timeout=5
)
if resp.status_code != 200:
return False, f"Authentication failed: {resp.status_code}"
@@ -201,9 +216,10 @@ class Matrix(Provider):
self._init_reason: Optional[str] = None
matrix_conf = (
- self.config.get("provider", {}).get("matrix", {})
- if isinstance(self.config, dict)
- else {}
+ self.config.get("provider",
+ {}).get("matrix",
+ {}) if isinstance(self.config,
+ dict) else {}
)
homeserver = matrix_conf.get("homeserver")
access_token = matrix_conf.get("access_token")
@@ -237,14 +253,18 @@ class Matrix(Provider):
return False
if self._init_ok is False:
return False
- matrix_conf = self.config.get("provider", {}).get("matrix", {})
+ matrix_conf = self.config.get("provider",
+ {}).get("matrix",
+ {})
return bool(
matrix_conf.get("homeserver")
and (matrix_conf.get("access_token") or matrix_conf.get("password"))
)
def _get_homeserver_and_token(self) -> Tuple[str, str]:
- matrix_conf = self.config.get("provider", {}).get("matrix", {})
+ matrix_conf = self.config.get("provider",
+ {}).get("matrix",
+ {})
homeserver = matrix_conf.get("homeserver")
access_token = matrix_conf.get("access_token")
if not homeserver:
@@ -262,8 +282,14 @@ class Matrix(Provider):
Uses `GET /_matrix/client/v3/joined_rooms`.
"""
base, token = self._get_homeserver_and_token()
- headers = {"Authorization": f"Bearer {token}"}
- resp = requests.get(f"{base}/_matrix/client/v3/joined_rooms", headers=headers, timeout=10)
+ headers = {
+ "Authorization": f"Bearer {token}"
+ }
+ resp = requests.get(
+ f"{base}/_matrix/client/v3/joined_rooms",
+ headers=headers,
+ timeout=10
+ )
if resp.status_code != 200:
raise Exception(f"Matrix joined_rooms failed: {resp.text}")
data = resp.json() or {}
@@ -275,18 +301,24 @@ class Matrix(Provider):
out.append(rid.strip())
return out
- def list_rooms(self, *, room_ids: Optional[List[str]] = None) -> List[Dict[str, Any]]:
+ def list_rooms(self,
+ *,
+ room_ids: Optional[List[str]] = None) -> List[Dict[str,
+ Any]]:
"""Return joined rooms, optionally limited to a subset.
Performance note: room names require additional per-room HTTP requests.
If `room_ids` is provided, only those rooms will have name lookups.
"""
base, token = self._get_homeserver_and_token()
- headers = {"Authorization": f"Bearer {token}"}
+ headers = {
+ "Authorization": f"Bearer {token}"
+ }
joined = self.list_joined_room_ids()
if room_ids:
- allowed = {str(v).strip().casefold() for v in room_ids if str(v).strip()}
+ allowed = {str(v).strip().casefold()
+ for v in room_ids if str(v).strip()}
if allowed:
# Accept either full IDs (!id:hs) or short IDs (!id).
def _is_allowed(rid: str) -> bool:
@@ -319,7 +351,10 @@ class Matrix(Provider):
name = maybe
except Exception:
pass
- out.append({"room_id": room_id, "name": name})
+ out.append({
+ "room_id": room_id,
+ "name": name
+ })
return out
def upload_to_room(self, file_path: str, room_id: str, **kwargs: Any) -> str:
@@ -349,10 +384,17 @@ class Matrix(Provider):
upload_url = f"{base}/_matrix/media/v3/upload"
with open(path, "rb") as handle:
wrapped = ProgressFileReader(
- handle, total_bytes=int(path.stat().st_size), label="upload"
+ handle,
+ total_bytes=int(path.stat().st_size),
+ label="upload"
)
resp = requests.post(
- upload_url, headers=headers, data=wrapped, params={"filename": filename}
+ upload_url,
+ headers=headers,
+ data=wrapped,
+ params={
+ "filename": filename
+ }
)
if resp.status_code != 200:
raise Exception(f"Matrix upload failed: {resp.text}")
@@ -366,7 +408,7 @@ class Matrix(Provider):
try:
curi = str(content_uri or "").strip()
if curi.startswith("mxc://"):
- rest = curi[len("mxc://") :]
+ rest = curi[len("mxc://"):]
if "/" in rest:
server_name, media_id = rest.split("/", 1)
server_name = str(server_name).strip()
@@ -376,14 +418,24 @@ class Matrix(Provider):
except Exception:
download_url_for_store = ""
- info = {"mimetype": mime_type, "size": path.stat().st_size}
- payload = {"msgtype": msgtype, "body": filename, "url": content_uri, "info": info}
+ info = {
+ "mimetype": mime_type,
+ "size": path.stat().st_size
+ }
+ payload = {
+ "msgtype": msgtype,
+ "body": filename,
+ "url": content_uri,
+ "info": info
+ }
# Correct Matrix client API send endpoint requires a transaction ID.
txn_id = f"mm_{int(time.time())}_{uuid.uuid4().hex[:8]}"
encoded_room = quote(str(room_id), safe="")
send_url = f"{base}/_matrix/client/v3/rooms/{encoded_room}/send/m.room.message/{txn_id}"
- send_headers = {"Authorization": f"Bearer {token}"}
+ send_headers = {
+ "Authorization": f"Bearer {token}"
+ }
send_resp = requests.put(send_url, headers=send_headers, json=payload)
if send_resp.status_code != 200:
raise Exception(f"Matrix send message failed: {send_resp.text}")
@@ -391,8 +443,7 @@ class Matrix(Provider):
event_id = (send_resp.json() or {}).get("event_id")
link = (
f"https://matrix.to/#/{room_id}/{event_id}"
- if event_id
- else f"https://matrix.to/#/{room_id}"
+ if event_id else f"https://matrix.to/#/{room_id}"
)
# Optional: if a PipeObject is provided and it already has store+hash,
@@ -403,7 +454,10 @@ class Matrix(Provider):
from Store import Store
# Prefer the direct media download URL for storage backends.
- Store(self.config, suppress_debug=True).try_add_url_for_pipe_object(
+ Store(
+ self.config,
+ suppress_debug=True
+ ).try_add_url_for_pipe_object(
pipe_obj,
download_url_for_store or link,
)
@@ -424,8 +478,13 @@ class Matrix(Provider):
encoded_room = quote(str(room_id), safe="")
txn_id = f"mm_{int(time.time())}_{uuid.uuid4().hex[:8]}"
send_url = f"{base}/_matrix/client/v3/rooms/{encoded_room}/send/m.room.message/{txn_id}"
- send_headers = {"Authorization": f"Bearer {token}"}
- payload = {"msgtype": "m.text", "body": message}
+ send_headers = {
+ "Authorization": f"Bearer {token}"
+ }
+ payload = {
+ "msgtype": "m.text",
+ "body": message
+ }
send_resp = requests.put(send_url, headers=send_headers, json=payload)
if send_resp.status_code != 200:
raise Exception(f"Matrix send text failed: {send_resp.text}")
@@ -433,19 +492,25 @@ class Matrix(Provider):
event_id = (send_resp.json() or {}).get("event_id")
return (
f"https://matrix.to/#/{room_id}/{event_id}"
- if event_id
- else f"https://matrix.to/#/{room_id}"
+ if event_id else f"https://matrix.to/#/{room_id}"
)
def upload(self, file_path: str, **kwargs: Any) -> str:
- matrix_conf = self.config.get("provider", {}).get("matrix", {})
+ matrix_conf = self.config.get("provider",
+ {}).get("matrix",
+ {})
room_id = matrix_conf.get("room_id")
if not room_id:
raise Exception("Matrix room_id missing")
return self.upload_to_room(file_path, str(room_id))
def selector(
- self, selected_items: List[Any], *, ctx: Any, stage_is_last: bool = True, **_kwargs: Any
+ self,
+ selected_items: List[Any],
+ *,
+ ctx: Any,
+ stage_is_last: bool = True,
+ **_kwargs: Any
) -> bool:
"""Handle Matrix room selection via `@N`.
@@ -501,7 +566,11 @@ class Matrix(Provider):
print(f"Matrix upload file missing: {file_path}")
continue
- link = self.upload_to_room(str(media_path), str(room_id), pipe_obj=pipe_obj)
+ link = self.upload_to_room(
+ str(media_path),
+ str(room_id),
+ pipe_obj=pipe_obj
+ )
if link:
print(link)
diff --git a/Provider/metadata_provider.py b/Provider/metadata_provider.py
index f10bc07..a5e7bb9 100644
--- a/Provider/metadata_provider.py
+++ b/Provider/metadata_provider.py
@@ -16,7 +16,6 @@ try: # Optional dependency
except ImportError: # pragma: no cover - optional
musicbrainzngs = None
-
try: # Optional dependency
import yt_dlp # type: ignore
except ImportError: # pragma: no cover - optional
@@ -62,9 +61,18 @@ class ITunesProvider(MetadataProvider):
"""Metadata provider using the iTunes Search API."""
def search(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
- params = {"term": query, "media": "music", "entity": "song", "limit": limit}
+ params = {
+ "term": query,
+ "media": "music",
+ "entity": "song",
+ "limit": limit
+ }
try:
- resp = requests.get("https://itunes.apple.com/search", params=params, timeout=10)
+ resp = requests.get(
+ "https://itunes.apple.com/search",
+ params=params,
+ timeout=10
+ )
resp.raise_for_status()
results = resp.json().get("results", [])
except Exception as exc:
@@ -77,7 +85,8 @@ class ITunesProvider(MetadataProvider):
"title": r.get("trackName"),
"artist": r.get("artistName"),
"album": r.get("collectionName"),
- "year": str(r.get("releaseDate", ""))[:4],
+ "year": str(r.get("releaseDate",
+ ""))[:4],
"provider": self.name,
"raw": r,
}
@@ -100,17 +109,22 @@ class OpenLibraryMetadataProvider(MetadataProvider):
try:
# Prefer ISBN-specific search when the query looks like one
- if query_clean.replace("-", "").isdigit() and len(query_clean.replace("-", "")) in (
- 10,
- 13,
- ):
+ if query_clean.replace("-",
+ "").isdigit() and len(query_clean.replace("-",
+ "")) in (
+ 10,
+ 13,
+ ):
q = f"isbn:{query_clean.replace('-', '')}"
else:
q = query_clean
resp = requests.get(
"https://openlibrary.org/search.json",
- params={"q": q, "limit": limit},
+ params={
+ "q": q,
+ "limit": limit
+ },
timeout=10,
)
resp.raise_for_status()
@@ -202,7 +216,10 @@ class GoogleBooksMetadataProvider(MetadataProvider):
return []
# Prefer ISBN queries when possible
- if query_clean.replace("-", "").isdigit() and len(query_clean.replace("-", "")) in (10, 13):
+ if query_clean.replace("-",
+ "").isdigit() and len(query_clean.replace("-",
+ "")) in (10,
+ 13):
q = f"isbn:{query_clean.replace('-', '')}"
else:
q = query_clean
@@ -210,7 +227,10 @@ class GoogleBooksMetadataProvider(MetadataProvider):
try:
resp = requests.get(
"https://www.googleapis.com/books/v1/volumes",
- params={"q": q, "maxResults": limit},
+ params={
+ "q": q,
+ "maxResults": limit
+ },
timeout=10,
)
resp.raise_for_status()
@@ -228,7 +248,10 @@ class GoogleBooksMetadataProvider(MetadataProvider):
year = str(published_date)[:4] if published_date else ""
identifiers_raw = info.get("industryIdentifiers") or []
- identifiers: Dict[str, Optional[str]] = {"googlebooks": volume.get("id")}
+ identifiers: Dict[str,
+ Optional[str]] = {
+ "googlebooks": volume.get("id")
+ }
for ident in identifiers_raw:
if not isinstance(ident, dict):
continue
@@ -253,7 +276,8 @@ class GoogleBooksMetadataProvider(MetadataProvider):
"authors": authors,
"publisher": publisher,
"identifiers": identifiers,
- "description": info.get("description", ""),
+ "description": info.get("description",
+ ""),
}
)
@@ -341,7 +365,8 @@ class ISBNsearchMetadataProvider(MetadataProvider):
if m_title:
title = self._strip_html_to_text(m_title.group(1))
- raw_fields: Dict[str, str] = {}
+ raw_fields: Dict[str,
+ str] = {}
strong_matches = list(re.finditer(r"(?is)]*>(.*?)", html))
for idx, m in enumerate(strong_matches):
label_raw = self._strip_html_to_text(m.group(1))
@@ -354,13 +379,14 @@ class ISBNsearchMetadataProvider(MetadataProvider):
chunk_start = m.end()
# Stop at next or end of document.
chunk_end = (
- strong_matches[idx + 1].start() if (idx + 1) < len(strong_matches) else len(html)
+ strong_matches[idx + 1].start() if
+ (idx + 1) < len(strong_matches) else len(html)
)
chunk = html[chunk_start:chunk_end]
# Prefer stopping within the same paragraph when possible.
m_end = re.search(r"(?is)(
|
)", chunk)
if m_end:
- chunk = chunk[: m_end.start()]
+ chunk = chunk[:m_end.start()]
val_text = self._strip_html_to_text(chunk)
if not val_text:
@@ -391,7 +417,9 @@ class ISBNsearchMetadataProvider(MetadataProvider):
authors: List[str] = []
if author_text:
# Split on common separators; keep multi-part names intact.
- for part in re.split(r"\s*(?:,|;|\band\b|\&|\|)\s*", author_text, flags=re.IGNORECASE):
+ for part in re.split(r"\s*(?:,|;|\band\b|\&|\|)\s*",
+ author_text,
+ flags=re.IGNORECASE):
p = str(part or "").strip()
if p:
authors.append(p)
@@ -412,23 +440,28 @@ class ISBNsearchMetadataProvider(MetadataProvider):
if t and t not in isbn_tokens:
isbn_tokens.append(t)
- item: Dict[str, Any] = {
- "title": title or "",
- # Keep UI columns compatible with the generic metadata table.
- "artist": ", ".join(authors) if authors else "",
- "album": publisher or "",
- "year": year or "",
- "provider": self.name,
- "authors": authors,
- "publisher": publisher or "",
- "language": language or "",
- "pages": pages or "",
- "identifiers": {
- "isbn_13": next((t for t in isbn_tokens if len(t) == 13), None),
- "isbn_10": next((t for t in isbn_tokens if len(t) == 10), None),
- },
- "raw_fields": raw_fields,
- }
+ item: Dict[str,
+ Any] = {
+ "title": title or "",
+ # Keep UI columns compatible with the generic metadata table.
+ "artist": ", ".join(authors) if authors else "",
+ "album": publisher or "",
+ "year": year or "",
+ "provider": self.name,
+ "authors": authors,
+ "publisher": publisher or "",
+ "language": language or "",
+ "pages": pages or "",
+ "identifiers": {
+ "isbn_13":
+ next((t for t in isbn_tokens if len(t) == 13),
+ None),
+ "isbn_10":
+ next((t for t in isbn_tokens if len(t) == 10),
+ None),
+ },
+ "raw_fields": raw_fields,
+ }
# Only return usable items.
if not item.get("title") and not any(item["identifiers"].values()):
@@ -495,7 +528,10 @@ class MusicBrainzMetadataProvider(MetadataProvider):
def search(self, query: str, limit: int = 10) -> List[Dict[str, Any]]:
if not musicbrainzngs:
- log("musicbrainzngs is not installed; skipping MusicBrainz scrape", file=sys.stderr)
+ log(
+ "musicbrainzngs is not installed; skipping MusicBrainz scrape",
+ file=sys.stderr
+ )
return []
q = (query or "").strip()
@@ -526,12 +562,15 @@ class MusicBrainzMetadataProvider(MetadataProvider):
if isinstance(artist_credit, list) and artist_credit:
first = artist_credit[0]
if isinstance(first, dict):
- artist = first.get("name") or first.get("artist", {}).get("name", "")
+ artist = first.get("name") or first.get("artist",
+ {}).get("name",
+ "")
elif isinstance(first, str):
artist = first
album = ""
- release_list = rec.get("release-list") or rec.get("releases") or rec.get("release")
+ release_list = rec.get("release-list") or rec.get("releases"
+ ) or rec.get("release")
if isinstance(release_list, list) and release_list:
first_rel = release_list[0]
if isinstance(first_rel, dict):
@@ -634,7 +673,8 @@ class YtdlpMetadataProvider(MetadataProvider):
upload_date = str(info.get("upload_date") or "")
release_date = str(info.get("release_date") or "")
- year = (release_date or upload_date)[:4] if (release_date or upload_date) else ""
+ year = (release_date
+ or upload_date)[:4] if (release_date or upload_date) else ""
# Provide basic columns for the standard metadata selection table.
# NOTE: This is best-effort; many extractors don't provide artist/album.
@@ -716,15 +756,16 @@ class YtdlpMetadataProvider(MetadataProvider):
# Registry ---------------------------------------------------------------
-_METADATA_PROVIDERS: Dict[str, Type[MetadataProvider]] = {
- "itunes": ITunesProvider,
- "openlibrary": OpenLibraryMetadataProvider,
- "googlebooks": GoogleBooksMetadataProvider,
- "google": GoogleBooksMetadataProvider,
- "isbnsearch": ISBNsearchMetadataProvider,
- "musicbrainz": MusicBrainzMetadataProvider,
- "ytdlp": YtdlpMetadataProvider,
-}
+_METADATA_PROVIDERS: Dict[str,
+ Type[MetadataProvider]] = {
+ "itunes": ITunesProvider,
+ "openlibrary": OpenLibraryMetadataProvider,
+ "googlebooks": GoogleBooksMetadataProvider,
+ "google": GoogleBooksMetadataProvider,
+ "isbnsearch": ISBNsearchMetadataProvider,
+ "musicbrainz": MusicBrainzMetadataProvider,
+ "ytdlp": YtdlpMetadataProvider,
+ }
def register_provider(name: str, provider_cls: Type[MetadataProvider]) -> None:
@@ -732,7 +773,8 @@ def register_provider(name: str, provider_cls: Type[MetadataProvider]) -> None:
def list_metadata_providers(config: Optional[Dict[str, Any]] = None) -> Dict[str, bool]:
- availability: Dict[str, bool] = {}
+ availability: Dict[str,
+ bool] = {}
for name, cls in _METADATA_PROVIDERS.items():
try:
_ = cls(config)
@@ -743,9 +785,10 @@ def list_metadata_providers(config: Optional[Dict[str, Any]] = None) -> Dict[str
return availability
-def get_metadata_provider(
- name: str, config: Optional[Dict[str, Any]] = None
-) -> Optional[MetadataProvider]:
+def get_metadata_provider(name: str,
+ config: Optional[Dict[str,
+ Any]] = None
+ ) -> Optional[MetadataProvider]:
cls = _METADATA_PROVIDERS.get(name.lower())
if not cls:
return None
diff --git a/Provider/openlibrary.py b/Provider/openlibrary.py
index 16c8a5b..ae7e368 100644
--- a/Provider/openlibrary.py
+++ b/Provider/openlibrary.py
@@ -19,7 +19,7 @@ import requests
from API.HTTP import HTTPClient
from ProviderCore.base import Provider, SearchResult
from ProviderCore.download import download_file, sanitize_filename
-from cli_syntax import get_field, get_free_text, parse_query
+from SYS.cli_syntax import get_field, get_free_text, parse_query
from SYS.logger import debug, log
from SYS.utils import unique_path
@@ -52,7 +52,9 @@ def _image_paths_to_pdf_bytes(images: List[str]) -> Optional[bytes]:
continue
with Image.open(img_path) as im: # type: ignore[attr-defined]
# Ensure PDF-compatible mode.
- if im.mode in {"RGBA", "LA", "P"}:
+ if im.mode in {"RGBA",
+ "LA",
+ "P"}:
im = im.convert("RGB")
else:
im = im.convert("RGB")
@@ -125,7 +127,8 @@ def _resolve_edition_id(doc: Dict[str, Any]) -> str:
def _check_lendable(session: requests.Session, edition_id: str) -> Tuple[bool, str]:
"""Return (lendable, status_text) using OpenLibrary volumes API."""
try:
- if not edition_id or not edition_id.startswith("OL") or not edition_id.endswith("M"):
+ if not edition_id or not edition_id.startswith("OL") or not edition_id.endswith(
+ "M"):
return False, "not-an-edition"
url = f"https://openlibrary.org/api/volumes/brief/json/OLID:{edition_id}"
@@ -155,7 +158,9 @@ def _check_lendable(session: requests.Session, edition_id: str) -> Tuple[bool, s
def _resolve_archive_id(
- session: requests.Session, edition_id: str, ia_candidates: List[str]
+ session: requests.Session,
+ edition_id: str,
+ ia_candidates: List[str]
) -> str:
# Prefer IA identifiers already present in search results.
if ia_candidates:
@@ -165,7 +170,10 @@ def _resolve_archive_id(
# Otherwise query the edition JSON.
try:
- resp = session.get(f"https://openlibrary.org/books/{edition_id}.json", timeout=6)
+ resp = session.get(
+ f"https://openlibrary.org/books/{edition_id}.json",
+ timeout=6
+ )
resp.raise_for_status()
data = resp.json() or {}
@@ -206,13 +214,19 @@ def _archive_id_from_url(url: str) -> str:
# - /details//...
# - /borrow/
# - /download//...
- if len(parts) >= 2 and parts[0].lower() in {"details", "borrow", "download", "stream"}:
+ if len(parts) >= 2 and parts[0].lower() in {"details",
+ "borrow",
+ "download",
+ "stream"}:
return str(parts[1]).strip()
# Sometimes the identifier is the first segment.
if len(parts) >= 1:
first = str(parts[0]).strip()
- if first and first.lower() not in {"account", "services", "search", "advancedsearch.php"}:
+ if first and first.lower() not in {"account",
+ "services",
+ "search",
+ "advancedsearch.php"}:
return first
return ""
@@ -249,14 +263,17 @@ def _coerce_archive_field_list(value: Any) -> List[str]:
return [s] if s else []
-def _archive_item_metadata_to_tags(archive_id: str, item_metadata: Dict[str, Any]) -> List[str]:
+def _archive_item_metadata_to_tags(archive_id: str,
+ item_metadata: Dict[str,
+ Any]) -> List[str]:
"""Map Archive.org metadata JSON (the `metadata` object) to tag strings.
This is intentionally best-effort and conservative: it focuses on stable,
useful bibliographic fields (title/author/publisher/ISBN/identifier/topics).
"""
archive_id_clean = str(archive_id or "").strip()
- meta = item_metadata if isinstance(item_metadata, dict) else {}
+ meta = item_metadata if isinstance(item_metadata,
+ dict) else {}
tags: List[str] = []
seen: set[str] = set()
@@ -374,7 +391,10 @@ def _archive_item_metadata_to_tags(archive_id: str, item_metadata: Dict[str, Any
return tags
-def _fetch_archive_item_metadata(archive_id: str, *, timeout: int = 8) -> Dict[str, Any]:
+def _fetch_archive_item_metadata(archive_id: str,
+ *,
+ timeout: int = 8) -> Dict[str,
+ Any]:
ident = str(archive_id or "").strip()
if not ident:
return {}
@@ -384,7 +404,8 @@ def _fetch_archive_item_metadata(archive_id: str, *, timeout: int = 8) -> Dict[s
if not isinstance(data, dict):
return {}
meta = data.get("metadata")
- return meta if isinstance(meta, dict) else {}
+ return meta if isinstance(meta,
+ dict) else {}
class OpenLibrary(Provider):
@@ -404,7 +425,9 @@ class OpenLibrary(Provider):
"""Raised when a book is not available for borrowing (waitlisted/in use)."""
@staticmethod
- def _credential_archive(config: Dict[str, Any]) -> Tuple[Optional[str], Optional[str]]:
+ def _credential_archive(config: Dict[str,
+ Any]) -> Tuple[Optional[str],
+ Optional[str]]:
"""Get Archive.org email/password from config.
Supports:
@@ -415,9 +438,11 @@ class OpenLibrary(Provider):
if not isinstance(config, dict):
return None, None
- provider_config = config.get("provider", {})
+ provider_config = config.get("provider",
+ {})
if isinstance(provider_config, dict):
- openlibrary_config = provider_config.get("openlibrary", {})
+ openlibrary_config = provider_config.get("openlibrary",
+ {})
if isinstance(openlibrary_config, dict):
email = openlibrary_config.get("email")
password = openlibrary_config.get("password")
@@ -456,7 +481,10 @@ class OpenLibrary(Provider):
"""Login to archive.org using the token-based services endpoint (matches test-login.py)."""
session = requests.Session()
- token_resp = session.get("https://archive.org/services/account/login/", timeout=30)
+ token_resp = session.get(
+ "https://archive.org/services/account/login/",
+ timeout=30
+ )
try:
token_json = token_resp.json()
except Exception as exc:
@@ -473,8 +501,14 @@ class OpenLibrary(Provider):
if not token:
raise RuntimeError("Archive login token missing")
- headers = {"Content-Type": "application/x-www-form-urlencoded"}
- payload = {"username": email, "password": password, "t": token}
+ headers = {
+ "Content-Type": "application/x-www-form-urlencoded"
+ }
+ payload = {
+ "username": email,
+ "password": password,
+ "t": token
+ }
login_resp = session.post(
"https://archive.org/services/account/login/",
@@ -499,22 +533,34 @@ class OpenLibrary(Provider):
@classmethod
def _archive_loan(
- cls, session: requests.Session, book_id: str, *, verbose: bool = True
+ cls,
+ session: requests.Session,
+ book_id: str,
+ *,
+ verbose: bool = True
) -> requests.Session:
- data = {"action": "grant_access", "identifier": book_id}
+ data = {
+ "action": "grant_access",
+ "identifier": book_id
+ }
session.post(
- "https://archive.org/services/loans/loan/searchInside.php", data=data, timeout=30
+ "https://archive.org/services/loans/loan/searchInside.php",
+ data=data,
+ timeout=30
)
data["action"] = "browse_book"
- response = session.post("https://archive.org/services/loans/loan/", data=data, timeout=30)
+ response = session.post(
+ "https://archive.org/services/loans/loan/",
+ data=data,
+ timeout=30
+ )
if response.status_code == 400:
try:
err = (response.json() or {}).get("error")
- if (
- err
- == "This book is not available to borrow at this time. Please try again later."
- ):
+ if (err ==
+ "This book is not available to borrow at this time. Please try again later."
+ ):
raise cls.BookNotAvailableError("Book is waitlisted or in use")
raise RuntimeError(f"Borrow failed: {err or response.text}")
except cls.BookNotAvailableError:
@@ -523,15 +569,26 @@ class OpenLibrary(Provider):
raise RuntimeError("The book cannot be borrowed")
data["action"] = "create_token"
- response = session.post("https://archive.org/services/loans/loan/", data=data, timeout=30)
+ response = session.post(
+ "https://archive.org/services/loans/loan/",
+ data=data,
+ timeout=30
+ )
if "token" in (response.text or ""):
return session
raise RuntimeError("Something went wrong when trying to borrow the book")
@staticmethod
def _archive_return_loan(session: requests.Session, book_id: str) -> None:
- data = {"action": "return_loan", "identifier": book_id}
- response = session.post("https://archive.org/services/loans/loan/", data=data, timeout=30)
+ data = {
+ "action": "return_loan",
+ "identifier": book_id
+ }
+ response = session.post(
+ "https://archive.org/services/loans/loan/",
+ data=data,
+ timeout=30
+ )
if response.status_code == 200:
try:
if (response.json() or {}).get("success"):
@@ -551,8 +608,8 @@ class OpenLibrary(Provider):
if session is None:
return
for url in (
- "https://archive.org/account/logout",
- "https://archive.org/account/logout.php",
+ "https://archive.org/account/logout",
+ "https://archive.org/account/logout.php",
):
try:
resp = session.get(url, timeout=15, allow_redirects=True)
@@ -579,7 +636,9 @@ class OpenLibrary(Provider):
resp = requests.get(f"https://archive.org/metadata/{ident}", timeout=8)
resp.raise_for_status()
data = resp.json() if resp is not None else {}
- meta = data.get("metadata", {}) if isinstance(data, dict) else {}
+ meta = data.get("metadata",
+ {}) if isinstance(data,
+ dict) else {}
collection = meta.get("collection") if isinstance(meta, dict) else None
values: List[str] = []
@@ -588,16 +647,20 @@ class OpenLibrary(Provider):
elif isinstance(collection, str):
values = [collection.strip().lower()]
- if any(v in {"inlibrary", "printdisabled", "lendinglibrary"} for v in values):
+ if any(v in {"inlibrary",
+ "printdisabled",
+ "lendinglibrary"} for v in values):
return True, "archive-collection"
return False, "archive-not-lendable"
except Exception:
return False, "archive-metadata-error"
@staticmethod
- def _archive_get_book_infos(
- session: requests.Session, url: str
- ) -> Tuple[str, List[str], Dict[str, Any]]:
+ def _archive_get_book_infos(session: requests.Session,
+ url: str) -> Tuple[str,
+ List[str],
+ Dict[str,
+ Any]]:
"""Extract page links from Archive.org book reader."""
r = session.get(url, timeout=30).text
@@ -620,7 +683,8 @@ class OpenLibrary(Provider):
metadata = data.get("metadata") or {}
links: List[str] = []
- br_data = (data.get("brOptions") or {}).get("data", [])
+ br_data = (data.get("brOptions") or {}).get("data",
+ [])
if isinstance(br_data, list):
for item in br_data:
if isinstance(item, list):
@@ -639,7 +703,11 @@ class OpenLibrary(Provider):
return f"{directory}/{(len(str(pages)) - len(str(page))) * '0'}{page}.jpg"
@staticmethod
- def _archive_deobfuscate_image(image_data: bytes, link: str, obf_header: str) -> bytes:
+ def _archive_deobfuscate_image(
+ image_data: bytes,
+ link: str,
+ obf_header: str
+ ) -> bytes:
if not AES or not Counter:
raise RuntimeError("Crypto library not available")
@@ -657,11 +725,18 @@ class OpenLibrary(Provider):
counter_bytes = base64.b64decode(counter_b64)
if len(counter_bytes) != 16:
- raise ValueError(f"Expected counter to be 16 bytes, got {len(counter_bytes)}")
+ raise ValueError(
+ f"Expected counter to be 16 bytes, got {len(counter_bytes)}"
+ )
prefix = counter_bytes[:8]
initial_value = int.from_bytes(counter_bytes[8:], byteorder="big")
- ctr = Counter.new(64, prefix=prefix, initial_value=initial_value, little_endian=False) # type: ignore
+ ctr = Counter.new(
+ 64,
+ prefix=prefix,
+ initial_value=initial_value,
+ little_endian=False
+ ) # type: ignore
cipher = AES.new(key, AES.MODE_CTR, counter=ctr) # type: ignore
decrypted_part = cipher.decrypt(image_data[:1024])
@@ -699,7 +774,11 @@ class OpenLibrary(Provider):
image = cls._archive_image_name(pages, i, directory)
obf_header = response.headers.get("X-Obfuscate")
if obf_header:
- image_content = cls._archive_deobfuscate_image(response.content, link, obf_header)
+ image_content = cls._archive_deobfuscate_image(
+ response.content,
+ link,
+ obf_header
+ )
else:
image_content = response.content
@@ -715,7 +794,9 @@ class OpenLibrary(Provider):
links: List[str],
scale: int,
book_id: str,
- progress_callback: Optional[Callable[[int, int], None]] = None,
+ progress_callback: Optional[Callable[[int,
+ int],
+ None]] = None,
) -> List[str]:
links_scaled = [f"{link}&rotate=0&scale={scale}" for link in links]
pages = len(links_scaled)
@@ -748,7 +829,8 @@ class OpenLibrary(Provider):
except Exception:
pass
elif tqdm:
- for _ in tqdm(futures.as_completed(tasks), total=len(tasks)): # type: ignore
+ for _ in tqdm(futures.as_completed(tasks),
+ total=len(tasks)): # type: ignore
pass
else:
for _ in futures.as_completed(tasks):
@@ -770,11 +852,16 @@ class OpenLibrary(Provider):
if not isinstance(file_info, dict):
continue
filename = str(file_info.get("name", ""))
- if filename.endswith(".pdf") and file_info.get("source") == "original":
+ if filename.endswith(".pdf") and file_info.get("source"
+ ) == "original":
pdf_url = (
f"https://archive.org/download/{book_id}/{filename.replace(' ', '%20')}"
)
- check_response = requests.head(pdf_url, timeout=4, allow_redirects=True)
+ check_response = requests.head(
+ pdf_url,
+ timeout=4,
+ allow_redirects=True
+ )
if check_response.status_code == 200:
return True, pdf_url
return False, ""
@@ -927,7 +1014,8 @@ class OpenLibrary(Provider):
author_key = None
if isinstance(author, dict):
if isinstance(author.get("author"), dict):
- author_key = author.get("author", {}).get("key")
+ author_key = author.get("author",
+ {}).get("key")
if not author_key:
author_key = author.get("key")
@@ -937,7 +1025,9 @@ class OpenLibrary(Provider):
with HTTPClient(timeout=10) as client:
author_resp = client.get(author_url)
author_resp.raise_for_status()
- author_data = json_module.loads(author_resp.content.decode("utf-8"))
+ author_data = json_module.loads(
+ author_resp.content.decode("utf-8")
+ )
if isinstance(author_data, dict) and author_data.get("name"):
new_tags.append(f"author:{author_data['name']}")
continue
@@ -1011,7 +1101,8 @@ class OpenLibrary(Provider):
self,
query: str,
limit: int = 50,
- filters: Optional[Dict[str, Any]] = None,
+ filters: Optional[Dict[str,
+ Any]] = None,
**kwargs: Any,
) -> List[SearchResult]:
filters = filters or {}
@@ -1032,7 +1123,10 @@ class OpenLibrary(Provider):
try:
resp = self._session.get(
"https://openlibrary.org/search.json",
- params={"q": q, "limit": int(limit)},
+ params={
+ "q": q,
+ "limit": int(limit)
+ },
timeout=10,
)
resp.raise_for_status()
@@ -1048,9 +1142,13 @@ class OpenLibrary(Provider):
# Availability enrichment can be slow if done sequentially (it may require multiple
# network calls per row). Do it concurrently to keep the pipeline responsive.
- docs = docs[: int(limit)]
+ docs = docs[:int(limit)]
- def _compute_availability(doc_dict: Dict[str, Any]) -> Tuple[str, str, str, str]:
+ def _compute_availability(doc_dict: Dict[str,
+ Any]) -> Tuple[str,
+ str,
+ str,
+ str]:
edition_id_local = _resolve_edition_id(doc_dict)
if not edition_id_local:
return "no-olid", "", "", ""
@@ -1066,7 +1164,9 @@ class OpenLibrary(Provider):
try:
archive_id_local = _resolve_archive_id(
- session_local, edition_id_local, ia_ids_local
+ session_local,
+ edition_id_local,
+ ia_ids_local
)
except Exception:
archive_id_local = ""
@@ -1089,17 +1189,23 @@ class OpenLibrary(Provider):
return "unavailable", reason_local, archive_id_local, ""
- availability_rows: List[Tuple[str, str, str, str]] = [
- ("unknown", "", "", "") for _ in range(len(docs))
- ]
+ availability_rows: List[Tuple[str,
+ str,
+ str,
+ str]] = [
+ ("unknown",
+ "",
+ "",
+ "") for _ in range(len(docs))
+ ]
if docs:
max_workers = min(8, max(1, len(docs)))
done = 0
with futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
future_to_index = {
- executor.submit(_compute_availability, doc_dict): i
- for i, doc_dict in enumerate(docs)
- if isinstance(doc_dict, dict)
+ executor.submit(_compute_availability,
+ doc_dict): i
+ for i, doc_dict in enumerate(docs) if isinstance(doc_dict, dict)
}
for fut in futures.as_completed(list(future_to_index.keys())):
i = future_to_index[fut]
@@ -1145,11 +1251,16 @@ class OpenLibrary(Provider):
isbn_10 = next((str(i) for i in isbn_list if len(str(i)) == 10), "")
columns = [
- ("Title", book_title),
- ("Author", ", ".join(authors_list)),
- ("Year", year),
- ("Avail", ""),
- ("OLID", edition_id),
+ ("Title",
+ book_title),
+ ("Author",
+ ", ".join(authors_list)),
+ ("Year",
+ year),
+ ("Avail",
+ ""),
+ ("OLID",
+ edition_id),
]
# Determine availability using the concurrently computed enrichment.
@@ -1170,7 +1281,8 @@ class OpenLibrary(Provider):
annotations.append(f"isbn_10:{isbn_10}")
if ia_ids:
annotations.append("archive")
- if availability in {"download", "borrow"}:
+ if availability in {"download",
+ "borrow"}:
annotations.append(availability)
results.append(
@@ -1178,17 +1290,17 @@ class OpenLibrary(Provider):
table="openlibrary",
title=book_title,
path=(
- f"https://openlibrary.org/books/{edition_id}"
- if edition_id
- else (
+ f"https://openlibrary.org/books/{edition_id}" if edition_id else
+ (
f"https://openlibrary.org{work_key}"
- if isinstance(work_key, str) and work_key.startswith("/")
- else "https://openlibrary.org"
+ if isinstance(work_key,
+ str) and work_key.startswith("/") else
+ "https://openlibrary.org"
)
),
detail=(
- (f"By: {', '.join(authors_list)}" if authors_list else "")
- + (f" ({year})" if year else "")
+ (f"By: {', '.join(authors_list)}" if authors_list else "") +
+ (f" ({year})" if year else "")
).strip(),
annotations=annotations,
media_kind="book",
@@ -1216,7 +1328,11 @@ class OpenLibrary(Provider):
self,
result: SearchResult,
output_dir: Path,
- progress_callback: Optional[Callable[[str, int, Optional[int], str], None]] = None,
+ progress_callback: Optional[Callable[[str,
+ int,
+ Optional[int],
+ str],
+ None]] = None,
) -> Optional[Path]:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
@@ -1245,7 +1361,10 @@ class OpenLibrary(Provider):
archive_id = _archive_id_from_url(str(getattr(result, "path", "") or ""))
if not archive_id:
- log("[openlibrary] No archive identifier available; cannot download", file=sys.stderr)
+ log(
+ "[openlibrary] No archive identifier available; cannot download",
+ file=sys.stderr
+ )
return None
# Best-effort metadata scrape to attach bibliographic tags for downstream cmdlets.
@@ -1290,12 +1409,9 @@ class OpenLibrary(Provider):
session=self._session,
progress_callback=(
(
- lambda downloaded, total, label: progress_callback(
- "bytes", downloaded, total, label
- )
- )
- if progress_callback is not None
- else None
+ lambda downloaded, total, label:
+ progress_callback("bytes", downloaded, total, label)
+ ) if progress_callback is not None else None
),
)
if ok:
@@ -1307,7 +1423,10 @@ class OpenLibrary(Provider):
try:
email, password = self._credential_archive(self.config or {})
if not email or not password:
- log("[openlibrary] Archive credentials missing; cannot borrow", file=sys.stderr)
+ log(
+ "[openlibrary] Archive credentials missing; cannot borrow",
+ file=sys.stderr
+ )
return None
lendable = True
@@ -1369,7 +1488,10 @@ class OpenLibrary(Provider):
continue
if not links:
- log(f"[openlibrary] Failed to extract pages: {last_exc}", file=sys.stderr)
+ log(
+ f"[openlibrary] Failed to extract pages: {last_exc}",
+ file=sys.stderr
+ )
return None
try:
@@ -1388,9 +1510,10 @@ class OpenLibrary(Provider):
scale=3,
book_id=archive_id,
progress_callback=(
- (lambda done, total: progress_callback("pages", done, total, "pages"))
- if progress_callback is not None
- else None
+ (
+ lambda done, total:
+ progress_callback("pages", done, total, "pages")
+ ) if progress_callback is not None else None
),
)
@@ -1436,7 +1559,10 @@ class OpenLibrary(Provider):
try:
self._archive_return_loan(session, archive_id)
except Exception as exc:
- log(f"[openlibrary] Warning: failed to return loan: {exc}", file=sys.stderr)
+ log(
+ f"[openlibrary] Warning: failed to return loan: {exc}",
+ file=sys.stderr
+ )
try:
self._archive_logout(session)
except Exception:
diff --git a/Provider/soulseek.py b/Provider/soulseek.py
index f015b49..07fe83b 100644
--- a/Provider/soulseek.py
+++ b/Provider/soulseek.py
@@ -15,7 +15,6 @@ from ProviderCore.base import Provider, SearchResult
from SYS.logger import log, debug
from models import ProgressBar
-
_SOULSEEK_NOISE_SUBSTRINGS = (
"search reply ticket does not match any search request",
"failed to receive transfer ticket on file connection",
@@ -82,11 +81,11 @@ def _configure_aioslsk_logging() -> None:
aioslsk to ERROR and stop propagation so it doesn't spam the CLI.
"""
for name in (
- "aioslsk",
- "aioslsk.network",
- "aioslsk.search",
- "aioslsk.transfer",
- "aioslsk.transfer.manager",
+ "aioslsk",
+ "aioslsk.network",
+ "aioslsk.search",
+ "aioslsk.transfer",
+ "aioslsk.transfer.manager",
):
logger = logging.getLogger(name)
logger.setLevel(logging.ERROR)
@@ -237,7 +236,7 @@ class Soulseek(Provider):
def __init__(self, config: Optional[Dict[str, Any]] = None):
super().__init__(config)
try:
- from config import get_soulseek_username, get_soulseek_password
+ from SYS.config import get_soulseek_username, get_soulseek_password
user = get_soulseek_username(self.config)
pwd = get_soulseek_password(self.config)
@@ -257,7 +256,10 @@ class Soulseek(Provider):
filename = full_metadata.get("filename") or result.path
if not username or not filename:
- log(f"[soulseek] Missing metadata for download: {result.title}", file=sys.stderr)
+ log(
+ f"[soulseek] Missing metadata for download: {result.title}",
+ file=sys.stderr
+ )
return None
# This cmdlet stack is synchronous; use asyncio.run for clarity.
@@ -294,9 +296,11 @@ class Soulseek(Provider):
log(f"[soulseek] Download error: {exc}", file=sys.stderr)
return None
- async def perform_search(
- self, query: str, timeout: float = 9.0, limit: int = 50
- ) -> List[Dict[str, Any]]:
+ async def perform_search(self,
+ query: str,
+ timeout: float = 9.0,
+ limit: int = 50) -> List[Dict[str,
+ Any]]:
"""Perform async Soulseek search."""
from aioslsk.client import SoulSeekClient
@@ -305,7 +309,10 @@ class Soulseek(Provider):
os.makedirs(self.DOWNLOAD_DIR, exist_ok=True)
settings = Settings(
- credentials=CredentialsSettings(username=self.USERNAME, password=self.PASSWORD)
+ credentials=CredentialsSettings(
+ username=self.USERNAME,
+ password=self.PASSWORD
+ )
)
client = SoulSeekClient(settings)
@@ -315,7 +322,10 @@ class Soulseek(Provider):
await client.start()
await client.login()
except Exception as exc:
- log(f"[soulseek] Login failed: {type(exc).__name__}: {exc}", file=sys.stderr)
+ log(
+ f"[soulseek] Login failed: {type(exc).__name__}: {exc}",
+ file=sys.stderr
+ )
return []
try:
@@ -323,7 +333,10 @@ class Soulseek(Provider):
await self._collect_results(search_request, timeout=timeout)
return self._flatten_results(search_request)[:limit]
except Exception as exc:
- log(f"[soulseek] Search error: {type(exc).__name__}: {exc}", file=sys.stderr)
+ log(
+ f"[soulseek] Search error: {type(exc).__name__}: {exc}",
+ file=sys.stderr
+ )
return []
finally:
# Best-effort: try to cancel/close the search request before stopping
@@ -356,8 +369,12 @@ class Soulseek(Provider):
{
"file": file_data,
"username": username,
- "filename": getattr(file_data, "filename", "?"),
- "size": getattr(file_data, "filesize", 0),
+ "filename": getattr(file_data,
+ "filename",
+ "?"),
+ "size": getattr(file_data,
+ "filesize",
+ 0),
}
)
@@ -366,14 +383,22 @@ class Soulseek(Provider):
{
"file": file_data,
"username": username,
- "filename": getattr(file_data, "filename", "?"),
- "size": getattr(file_data, "filesize", 0),
+ "filename": getattr(file_data,
+ "filename",
+ "?"),
+ "size": getattr(file_data,
+ "filesize",
+ 0),
}
)
return flat
- async def _collect_results(self, search_request: Any, timeout: float = 75.0) -> None:
+ async def _collect_results(
+ self,
+ search_request: Any,
+ timeout: float = 75.0
+ ) -> None:
end = time.time() + timeout
last_count = 0
while time.time() < end:
@@ -387,20 +412,28 @@ class Soulseek(Provider):
self,
query: str,
limit: int = 50,
- filters: Optional[Dict[str, Any]] = None,
+ filters: Optional[Dict[str,
+ Any]] = None,
**kwargs: Any,
) -> List[SearchResult]:
filters = filters or {}
try:
- flat_results = asyncio.run(self.perform_search(query, timeout=9.0, limit=limit))
+ flat_results = asyncio.run(
+ self.perform_search(query,
+ timeout=9.0,
+ limit=limit)
+ )
if not flat_results:
return []
music_results: List[dict] = []
for item in flat_results:
filename = item["filename"]
- ext = ("." + filename.rsplit(".", 1)[-1].lower()) if "." in filename else ""
+ ext = (
+ "." + filename.rsplit(".",
+ 1)[-1].lower()
+ ) if "." in filename else ""
if ext in self.MUSIC_EXTENSIONS:
music_results.append(item)
@@ -410,18 +443,23 @@ class Soulseek(Provider):
enriched_results: List[dict] = []
for item in music_results:
filename = item["filename"]
- ext = ("." + filename.rsplit(".", 1)[-1].lower()) if "." in filename else ""
+ ext = (
+ "." + filename.rsplit(".",
+ 1)[-1].lower()
+ ) if "." in filename else ""
display_name = filename.replace("\\", "/").split("/")[-1]
path_parts = filename.replace("\\", "/").split("/")
artist = path_parts[-3] if len(path_parts) >= 3 else ""
album = (
- path_parts[-2]
- if len(path_parts) >= 3
- else (path_parts[-2] if len(path_parts) == 2 else "")
+ path_parts[-2] if len(path_parts) >= 3 else
+ (path_parts[-2] if len(path_parts) == 2 else "")
)
- base_name = display_name.rsplit(".", 1)[0] if "." in display_name else display_name
+ base_name = display_name.rsplit(
+ ".",
+ 1
+ )[0] if "." in display_name else display_name
track_num = ""
title = base_name
filename_artist = ""
@@ -457,7 +495,8 @@ class Soulseek(Provider):
if artist_filter or album_filter or track_filter:
filtered: List[dict] = []
for item in enriched_results:
- if artist_filter and artist_filter not in item["artist"].lower():
+ if artist_filter and artist_filter not in item["artist"].lower(
+ ):
continue
if album_filter and album_filter not in item["album"].lower():
continue
@@ -466,7 +505,9 @@ class Soulseek(Provider):
filtered.append(item)
enriched_results = filtered
- enriched_results.sort(key=lambda item: (item["ext"].lower() != ".flac", -item["size"]))
+ enriched_results.sort(
+ key=lambda item: (item["ext"].lower() != ".flac", -item["size"])
+ )
results: List[SearchResult] = []
for item in enriched_results:
@@ -475,11 +516,16 @@ class Soulseek(Provider):
size_mb = int(item["size"] / 1024 / 1024)
columns = [
- ("Track", item["track_num"] or "?"),
- ("Title", item["title"][:40]),
- ("Artist", artist_display[:32]),
- ("Album", album_display[:32]),
- ("Size", f"{size_mb} MB"),
+ ("Track",
+ item["track_num"] or "?"),
+ ("Title",
+ item["title"][:40]),
+ ("Artist",
+ artist_display[:32]),
+ ("Album",
+ album_display[:32]),
+ ("Size",
+ f"{size_mb} MB"),
]
results.append(
@@ -488,7 +534,8 @@ class Soulseek(Provider):
title=item["title"],
path=item["filename"],
detail=f"{artist_display} - {album_display}",
- annotations=[f"{size_mb} MB", item["ext"].lstrip(".").upper()],
+ annotations=[f"{size_mb} MB",
+ item["ext"].lstrip(".").upper()],
media_kind="audio",
size_bytes=item["size"],
columns=columns,
@@ -515,7 +562,7 @@ class Soulseek(Provider):
# Require configured credentials.
try:
- from config import get_soulseek_username, get_soulseek_password
+ from SYS.config import get_soulseek_username, get_soulseek_password
user = get_soulseek_username(self.config)
pwd = get_soulseek_password(self.config)
@@ -570,10 +617,16 @@ async def download_soulseek_file(
)
settings = Settings(
- credentials=CredentialsSettings(username=login_user, password=login_pass)
+ credentials=CredentialsSettings(username=login_user,
+ password=login_pass)
)
- async def _attempt_once(attempt_num: int) -> tuple[Optional[Path], Any, int, float]:
+ async def _attempt_once(
+ attempt_num: int
+ ) -> tuple[Optional[Path],
+ Any,
+ int,
+ float]:
client = SoulSeekClient(settings)
with _suppress_aioslsk_noise():
async with _suppress_aioslsk_asyncio_task_noise():
@@ -586,10 +639,14 @@ async def download_soulseek_file(
f"[soulseek] Download attempt {attempt_num}: {username} :: {local_filename}",
file=sys.stderr,
)
- debug(f"[soulseek] Requesting download from {username}: {filename}")
+ debug(
+ f"[soulseek] Requesting download from {username}: {filename}"
+ )
transfer = await client.transfers.add(
- Transfer(username, filename, TransferDirection.DOWNLOAD)
+ Transfer(username,
+ filename,
+ TransferDirection.DOWNLOAD)
)
transfer.local_path = str(output_path)
await client.transfers.queue(transfer)
@@ -602,14 +659,29 @@ async def download_soulseek_file(
elapsed = time.time() - start_time
if elapsed > timeout:
log(
- f"[soulseek] Download timeout after {timeout}s", file=sys.stderr
+ f"[soulseek] Download timeout after {timeout}s",
+ file=sys.stderr
+ )
+ bytes_done = int(
+ getattr(transfer,
+ "bytes_transfered",
+ 0) or 0
+ )
+ state_val = getattr(
+ getattr(transfer,
+ "state",
+ None),
+ "VALUE",
+ None
)
- bytes_done = int(getattr(transfer, "bytes_transfered", 0) or 0)
- state_val = getattr(getattr(transfer, "state", None), "VALUE", None)
progress_bar.finish()
return None, state_val, bytes_done, elapsed
- bytes_done = int(getattr(transfer, "bytes_transfered", 0) or 0)
+ bytes_done = int(
+ getattr(transfer,
+ "bytes_transfered",
+ 0) or 0
+ )
total_bytes = int(getattr(transfer, "filesize", 0) or 0)
now = time.time()
if now - last_progress_time >= 0.5:
@@ -623,11 +695,18 @@ async def download_soulseek_file(
await asyncio.sleep(1)
- final_state = getattr(getattr(transfer, "state", None), "VALUE", None)
+ final_state = getattr(
+ getattr(transfer,
+ "state",
+ None),
+ "VALUE",
+ None
+ )
downloaded_path = (
Path(transfer.local_path)
- if getattr(transfer, "local_path", None)
- else output_path
+ if getattr(transfer,
+ "local_path",
+ None) else output_path
)
final_elapsed = time.time() - start_time
@@ -636,7 +715,8 @@ async def download_soulseek_file(
# If a file was written, treat it as success even if state is odd.
try:
- if downloaded_path.exists() and downloaded_path.stat().st_size > 0:
+ if downloaded_path.exists() and downloaded_path.stat(
+ ).st_size > 0:
if final_state != TransferState.COMPLETE:
log(
f"[soulseek] Transfer finalized as {final_state}, but file exists ({downloaded_path.stat().st_size} bytes). Keeping file.",
@@ -651,7 +731,8 @@ async def download_soulseek_file(
except Exception:
pass
- if final_state == TransferState.COMPLETE and downloaded_path.exists():
+ if final_state == TransferState.COMPLETE and downloaded_path.exists(
+ ):
debug(f"[soulseek] Download complete: {downloaded_path}")
return (
downloaded_path,
@@ -670,7 +751,8 @@ async def download_soulseek_file(
# Clean up 0-byte placeholder.
try:
- if downloaded_path.exists() and downloaded_path.stat().st_size == 0:
+ if downloaded_path.exists() and downloaded_path.stat(
+ ).st_size == 0:
downloaded_path.unlink(missing_ok=True)
except Exception:
pass
@@ -696,7 +778,8 @@ async def download_soulseek_file(
should_retry = (bytes_done == 0) and (elapsed < 15.0)
if attempt < max_attempts and should_retry:
log(
- f"[soulseek] Retrying after fast failure (state={final_state})", file=sys.stderr
+ f"[soulseek] Retrying after fast failure (state={final_state})",
+ file=sys.stderr
)
await asyncio.sleep(2)
continue
@@ -704,7 +787,10 @@ async def download_soulseek_file(
return None
except ImportError:
- log("[soulseek] aioslsk not installed. Install with: pip install aioslsk", file=sys.stderr)
+ log(
+ "[soulseek] aioslsk not installed. Install with: pip install aioslsk",
+ file=sys.stderr
+ )
return None
except Exception as exc:
log(f"[soulseek] Download failed: {type(exc).__name__}: {exc}", file=sys.stderr)
diff --git a/Provider/telegram.py b/Provider/telegram.py
index f9e12b2..e70d359 100644
--- a/Provider/telegram.py
+++ b/Provider/telegram.py
@@ -12,7 +12,6 @@ from urllib.parse import urlparse
from ProviderCore.base import Provider, SearchResult
-
_TELEGRAM_DEFAULT_TIMESTAMP_STEM_RE = re.compile(
r"^(?Pphoto|video|document|audio|voice|animation)_(?P\d{4}-\d{2}-\d{2})_(?P