This commit is contained in:
2026-01-31 19:00:04 -08:00
parent dcf16e0cc4
commit 6513a3ad04
25 changed files with 617 additions and 397 deletions

View File

@@ -95,9 +95,9 @@ def _resolve_verify_value(verify_ssl: bool) -> Union[bool, str]:
if res:
return res
except Exception:
pass
logger.exception("Failed while probing certifi helper inner block")
except Exception:
pass
logger.exception("Failed while invoking cert helper function")
return None
# Prefer helpful modules if available (use safe checks to avoid first-chance import errors)
@@ -107,7 +107,7 @@ def _resolve_verify_value(verify_ssl: bool) -> Union[bool, str]:
try:
os.environ["SSL_CERT_FILE"] = path
except Exception:
pass
logger.exception("Failed to set SSL_CERT_FILE environment variable")
logger.info(f"SSL_CERT_FILE not set; using bundle from {mod_name}: {path}")
return path
@@ -120,11 +120,11 @@ def _resolve_verify_value(verify_ssl: bool) -> Union[bool, str]:
try:
os.environ["SSL_CERT_FILE"] = path
except Exception:
pass
logger.exception("Failed to set SSL_CERT_FILE environment variable during certifi fallback")
logger.info(f"SSL_CERT_FILE not set; using certifi bundle: {path}")
return path
except Exception:
pass
logger.exception("Failed to probe certifi for trust bundle")
# Fallback to certifi
try:
@@ -135,11 +135,11 @@ def _resolve_verify_value(verify_ssl: bool) -> Union[bool, str]:
try:
os.environ["SSL_CERT_FILE"] = path
except Exception:
pass
logger.exception("Failed to set SSL_CERT_FILE environment variable during certifi fallback")
logger.info(f"SSL_CERT_FILE not set; using certifi bundle: {path}")
return path
except Exception:
pass
logger.exception("Failed to probe certifi for trust bundle")
return True
@@ -400,7 +400,7 @@ class HTTPClient:
try:
progress_callback(0, total_bytes)
except Exception:
pass
logger.exception("Error in progress_callback initial call")
with open(path, "wb") as f:
for chunk in response.iter_bytes(chunk_size):
@@ -415,7 +415,7 @@ class HTTPClient:
try:
progress_callback(bytes_downloaded, total_bytes)
except Exception:
pass
logger.exception("Error in progress_callback final call")
return path
@@ -496,7 +496,7 @@ class HTTPClient:
if 400 <= e.response.status_code < 500:
try:
response_text = e.response.text[:500]
except:
except Exception:
response_text = "<unable to read response>"
if log_http_errors:
logger.error(
@@ -506,7 +506,7 @@ class HTTPClient:
last_exception = e
try:
response_text = e.response.text[:200]
except:
except Exception:
response_text = "<unable to read response>"
logger.warning(
f"HTTP {e.response.status_code} on attempt {attempt + 1}/{self.retries}: {url} - {response_text}"
@@ -715,7 +715,7 @@ def download_direct_file(
except DownloadError:
raise
except Exception:
pass
logger.exception("Unexpected error while probing URL content")
suggested = _sanitize_filename(suggested_filename) if suggested_filename else ""
if suggested:
@@ -727,12 +727,14 @@ def download_direct_file(
try:
detected_ext = Path(str(filename)).suffix
except Exception:
logger.exception("Failed to detect file extension from filename")
detected_ext = ""
filename = suggested + detected_ext if detected_ext else suggested
try:
has_ext = bool(filename and Path(str(filename)).suffix)
except Exception:
logger.exception("Failed to determine if filename has extension")
has_ext = False
if filename and (not has_ext):
@@ -823,7 +825,7 @@ def download_direct_file(
total=total_val,
)
except Exception:
pass
logger.exception("Error updating pipeline progress transfer")
if progress_bar is not None:
progress_bar.update(
@@ -842,7 +844,7 @@ def download_direct_file(
if progress_bar is not None:
progress_bar.finish()
except Exception:
pass
logger.exception("Failed to finish progress bar")
try:
if pipeline_progress is not None and transfer_started[0] and hasattr(
@@ -850,7 +852,7 @@ def download_direct_file(
):
pipeline_progress.finish_transfer(label=str(filename or "download"))
except Exception:
pass
logger.exception("Failed to finish pipeline transfer")
if not quiet:
debug(f"✓ Downloaded in {elapsed:.1f}s")
@@ -871,7 +873,7 @@ def download_direct_file(
try:
hash_value = sha256_file(file_path)
except Exception:
pass
logger.exception("Failed to compute SHA256 of downloaded file")
tags: List[str] = []
if extract_ytdlp_tags is not None:
@@ -908,14 +910,14 @@ def download_direct_file(
if progress_bar is not None:
progress_bar.finish()
except Exception:
pass
logger.exception("Failed to finish progress bar during HTTP error handling")
try:
if pipeline_progress is not None and transfer_started[0] and hasattr(
pipeline_progress, "finish_transfer"
):
pipeline_progress.finish_transfer(label=str(filename or "download"))
except Exception:
pass
logger.exception("Failed to finish pipeline transfer during HTTP error handling")
log(f"Download error: {exc}", file=sys.stderr)
if debug_logger is not None:
@@ -930,14 +932,14 @@ def download_direct_file(
if progress_bar is not None:
progress_bar.finish()
except Exception:
pass
logger.exception("Failed to finish progress bar during error handling")
try:
if pipeline_progress is not None and transfer_started[0] and hasattr(
pipeline_progress, "finish_transfer"
):
pipeline_progress.finish_transfer(label=str(filename or "download"))
except Exception:
pass
logger.exception("Failed to finish pipeline transfer during error handling")
log(f"Error downloading file: {exc}", file=sys.stderr)
if debug_logger is not None:
@@ -1163,7 +1165,7 @@ class AsyncHTTPClient:
last_exception = e
try:
response_text = e.response.text[:200]
except:
except Exception:
response_text = "<unable to read response>"
logger.warning(
f"HTTP {e.response.status_code} on attempt {attempt + 1}/{self.retries}: {url} - {response_text}"

View File

@@ -178,7 +178,7 @@ class AllDebridClient:
logger.error(
f"[AllDebrid] Response body: {error_body[:200]}"
)
except:
except Exception:
pass
raise

View File

@@ -4,6 +4,10 @@ import subprocess
import sys
import shutil
from SYS.logger import log, debug
import logging
logger = logging.getLogger(__name__)
from pathlib import Path
from typing import Any, Dict, Iterable, List, Optional, Sequence, Set, Tuple
@@ -651,7 +655,7 @@ def write_tags(
)
sidecar = media_path.parent / f"{fallback_base}.tag"
except Exception:
pass
logger.exception("Failed to determine fallback .tag sidecar base for %s", media_path)
# Write via consolidated function
try:
@@ -1258,15 +1262,16 @@ def embed_metadata_in_file(
stderr_text = result.stderr.decode("utf-8", errors="replace")[:200]
debug(f"FFmpeg stderr: {stderr_text}", file=sys.stderr)
except Exception:
pass
logger.exception("Failed to decode FFmpeg stderr for %s", file_path)
return False
except Exception as exc:
if temp_file.exists():
try:
temp_file.unlink()
except Exception:
pass
logger.exception("Failed to remove FFmpeg temp file %s after error", temp_file)
debug(f"❌ Error embedding metadata: {exc}", file=sys.stderr)
logger.exception("Error embedding metadata into %s", file_path)
return False
@@ -2236,7 +2241,7 @@ def enrich_playlist_entries(entries: list, extractor: str) -> list:
enriched.append(full_info)
continue
except Exception:
pass
logger.exception("Failed to fetch full metadata for entry URL: %s", entry_url)
# Fallback to original entry if fetch failed
enriched.append(entry)
@@ -2306,7 +2311,7 @@ def extract_title_from_tags(tags_list: List[str]) -> Optional[str]:
if extracted:
return extracted
except Exception:
pass
logger.exception("extract_title failed while extracting title from tags")
for t in tags_list:
if isinstance(t, str) and t.lower().startswith("title:"):
@@ -2563,9 +2568,9 @@ def scrape_url_metadata(
}
)
except json_module.JSONDecodeError:
pass
logger.debug("Failed to decode flat playlist line %d as JSON: %r", idx, line[:200])
except Exception:
pass # Silently ignore if we can't get playlist entries
logger.exception("yt-dlp flat-playlist extraction failed for URL: %s", url)
# Fallback: if still no tags detected, get from first item
if not tags:
@@ -2751,6 +2756,7 @@ def apply_mutagen_metadata(path: Path, metadata: dict[str, str], fmt: str) -> No
audio[target_key] = [value]
changed = True
except Exception: # pragma: no cover - best effort only
logger.exception("mutagen: failed to set field %s for %s", target_key, path)
continue
if not changed:
return
@@ -2758,6 +2764,7 @@ def apply_mutagen_metadata(path: Path, metadata: dict[str, str], fmt: str) -> No
audio.save()
except Exception as exc: # pragma: no cover - best effort only
log(f"mutagen save failed: {exc}", file=sys.stderr)
logger.exception("mutagen save failed for %s", path)
def build_ffmpeg_command(

View File

@@ -8,7 +8,10 @@ import os
import shutil
import sys
import time
import logging
from threading import RLock
logger = logging.getLogger(__name__)
from dataclasses import dataclass, field
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Protocol, TextIO
@@ -474,7 +477,7 @@ class ProgressBar:
total=int(total)
)
except Exception:
pass
logger.exception("Failed to update pipeline UI transfer in ProgressBar._ensure_started")
return
if self._progress is not None and self._task_id is not None:
@@ -506,8 +509,8 @@ class ProgressBar:
else:
return
except Exception:
pass
logger.exception("Failed to initialize pipeline Live UI integration in ProgressBar._ensure_started")
stream = file if file is not None else sys.stderr
# Use shared stderr console when rendering to stderr (cooperates with PipelineLiveProgress).
if stream is sys.stderr:
@@ -516,6 +519,7 @@ class ProgressBar:
console = stderr_console()
except Exception:
logger.exception("Failed to acquire shared stderr Console from SYS.rich_display; using fallback Console")
console = Console(file=stream)
else:
console = Console(file=stream)
@@ -558,7 +562,7 @@ class ProgressBar:
int) and total > 0 else None,
)
except Exception:
pass
logger.exception("Failed to update pipeline UI transfer in ProgressBar.update")
return
if self._progress is None or self._task_id is None:
@@ -582,7 +586,7 @@ class ProgressBar:
try:
self._pipeline_ui.finish_transfer(label=self._pipeline_label)
except Exception:
pass
logger.exception("Failed to finish pipeline UI transfer in ProgressBar.finish")
finally:
self._pipeline_ui = None
self._pipeline_label = None
@@ -681,7 +685,7 @@ class ProgressFileReader:
# EOF
self._finish()
except Exception:
pass
logger.exception("Error while reading and updating ProgressFileReader")
return chunk
def seek(self, offset: int, whence: int = 0) -> Any:
@@ -695,7 +699,7 @@ class ProgressFileReader:
else:
self._read = pos
except Exception:
pass
logger.exception("Failed to determine file position in ProgressFileReader.seek")
return out
def tell(self) -> Any:
@@ -705,7 +709,7 @@ class ProgressFileReader:
try:
self._finish()
except Exception:
pass
logger.exception("Failed to finish ProgressFileReader progress in close")
return self._f.close()
def __getattr__(self, name: str) -> Any:
@@ -825,6 +829,7 @@ class PipelineLiveProgress:
try:
value = str(text or "").strip()
except Exception:
logger.exception("Failed to compute active subtask text in PipelineLiveProgress.set_active_subtask_text")
value = ""
self._active_subtask_text = value or None
@@ -1011,10 +1016,11 @@ class PipelineLiveProgress:
else:
stop_fn()
except Exception:
logger.exception("Failed to stop Live with clear parameter; retrying without clear")
try:
stop_fn()
except Exception:
pass
logger.exception("Failed to stop Live on retry")
self._live = None
self._console = None
@@ -1043,9 +1049,9 @@ class PipelineLiveProgress:
subtasks.stop_task(sub_id)
subtasks.update(sub_id, visible=False)
except Exception:
pass
logger.exception("Failed to stop or hide subtask %s in PipelineLiveProgress._hide_pipe_subtasks", sub_id)
except Exception:
pass
logger.exception("Failed to hide pipe subtasks for index %s", pipe_index)
def set_pipe_status_text(self, pipe_index: int, text: str) -> None:
"""Set a status line under the pipe bars for the given pipe."""
@@ -1071,20 +1077,21 @@ class PipelineLiveProgress:
try:
self._hide_pipe_subtasks(pidx)
except Exception:
pass
logger.exception("Failed to hide pipe subtasks while setting status text for pipe %s", pidx)
task_id = self._status_tasks.get(pidx)
if task_id is None:
try:
task_id = prog.add_task(msg)
except Exception:
logger.exception("Failed to add status task for pipe %s in set_pipe_status_text", pidx)
return
self._status_tasks[pidx] = task_id
try:
prog.update(task_id, description=msg, refresh=True)
except Exception:
pass
logger.exception("Failed to update status task %s in set_pipe_status_text", task_id)
def clear_pipe_status_text(self, pipe_index: int) -> None:
if not self._enabled:
@@ -1104,7 +1111,7 @@ class PipelineLiveProgress:
try:
prog.remove_task(task_id)
except Exception:
pass
logger.exception("Failed to remove pipe status task %s in clear_pipe_status_text", task_id)
def set_pipe_percent(self, pipe_index: int, percent: int) -> None:
"""Update the pipe bar as a percent (only when single-item mode is enabled)."""
@@ -1127,7 +1134,7 @@ class PipelineLiveProgress:
pipe_progress.update(pipe_task, completed=pct, total=100, refresh=True)
self._update_overall()
except Exception:
pass
logger.exception("Failed to set pipe percent for pipe %s in set_pipe_percent", pipe_index)
def _update_overall(self) -> None:
"""Update the overall pipeline progress task."""
@@ -1142,6 +1149,7 @@ class PipelineLiveProgress:
if self._pipe_done[i] >= max(1, self._pipe_totals[i])
)
except Exception:
logger.exception("Failed to compute completed pipes in _update_overall")
completed = 0
try:
@@ -1151,7 +1159,7 @@ class PipelineLiveProgress:
description=f"Pipeline: {completed}/{len(self._pipe_labels)} pipes completed",
)
except Exception:
pass
logger.exception("Failed to update overall pipeline task in _update_overall")
# Auto-stop Live rendering once all pipes are complete so the progress
# UI clears itself even if callers forget to stop it explicitly.
@@ -1161,7 +1169,7 @@ class PipelineLiveProgress:
if total_pipes > 0 and completed >= total_pipes:
self.stop()
except Exception:
pass
logger.exception("Failed to auto-stop Live UI after all pipes completed")
def begin_pipe_steps(self, pipe_index: int, *, total_steps: int) -> None:
"""Initialize step tracking for a pipe.
@@ -1187,11 +1195,11 @@ class PipelineLiveProgress:
try:
self.clear_pipe_status_text(pidx)
except Exception:
pass
logger.exception("Failed to clear pipe status text in begin_pipe_steps for %s", pidx)
try:
self.set_pipe_percent(pidx, 0)
except Exception:
pass
logger.exception("Failed to set initial pipe percent in begin_pipe_steps for %s", pidx)
def advance_pipe_step(self, pipe_index: int, text: str) -> None:
"""Advance the pipe's step counter by one.
@@ -1226,14 +1234,14 @@ class PipelineLiveProgress:
try:
self.set_pipe_status_text(pidx, line)
except Exception:
pass
logger.exception("Failed to set pipe status text in advance_pipe_step for pipe %s", pidx)
# Percent mapping only applies when the pipe is in percent mode (single-item).
try:
pct = 100 if done >= total else int(round((done / max(1, total)) * 100.0))
self.set_pipe_percent(pidx, pct)
except Exception:
pass
logger.exception("Failed to set pipe percent in advance_pipe_step for pipe %s", pidx)
def begin_transfer(self, *, label: str, total: Optional[int] = None) -> None:
if not self._enabled:
@@ -1247,14 +1255,14 @@ class PipelineLiveProgress:
if total is not None and total > 0:
self._transfers.update(self._transfer_tasks[key], total=int(total))
except Exception:
pass
logger.exception("Failed to update existing transfer task total for %s in begin_transfer", key)
return
task_total = int(total) if isinstance(total, int) and total > 0 else None
try:
task_id = self._transfers.add_task(key, total=task_total)
self._transfer_tasks[key] = task_id
except Exception:
pass
logger.exception("Failed to add transfer task %s in begin_transfer", key)
def update_transfer(
self,
@@ -1282,7 +1290,7 @@ class PipelineLiveProgress:
kwargs["total"] = int(total)
self._transfers.update(task_id, refresh=True, **kwargs)
except Exception:
pass
logger.exception("Failed to update transfer '%s'", key)
def finish_transfer(self, *, label: str) -> None:
if self._transfers is None:
@@ -1294,7 +1302,7 @@ class PipelineLiveProgress:
try:
self._transfers.remove_task(task_id)
except Exception:
pass
logger.exception("Failed to remove transfer task '%s' in finish_transfer", key)
def _ensure_pipe(self, pipe_index: int) -> bool:
if not self._enabled:
@@ -1330,12 +1338,12 @@ class PipelineLiveProgress:
try:
self.clear_pipe_status_text(pipe_index)
except Exception:
pass
logger.exception("Failed to clear pipe status text during begin_pipe for %s", pipe_index)
try:
self._pipe_step_total.pop(pipe_index, None)
self._pipe_step_done.pop(pipe_index, None)
except Exception:
pass
logger.exception("Failed to reset pipe step totals during begin_pipe for %s", pipe_index)
# If this pipe will process exactly one item, allow percent-based updates.
percent_mode = bool(int(total_items) == 1)
@@ -1351,7 +1359,7 @@ class PipelineLiveProgress:
try:
pipe_progress.start_task(pipe_task)
except Exception:
pass
logger.exception("Failed to start pipe task timer in begin_pipe for %s", pipe_index)
self._update_overall()
@@ -1386,6 +1394,7 @@ class PipelineLiveProgress:
"description",
"") or "").strip() or None
except Exception:
logger.exception("Failed to set active subtask text for first subtask %s in begin_pipe", first)
self._active_subtask_text = None
def on_emit(self, pipe_index: int, emitted: Any) -> None:
@@ -1429,7 +1438,7 @@ class PipelineLiveProgress:
f"{self._pipe_labels[pipe_index]}: {_pipeline_progress_item_label(emitted)}",
)
except Exception:
pass
logger.exception("Failed to update subtask description for current %s in on_emit", current)
subtasks.stop_task(current)
subtasks.update(current, visible=False)
@@ -1448,12 +1457,12 @@ class PipelineLiveProgress:
try:
self.clear_pipe_status_text(pipe_index)
except Exception:
pass
logger.exception("Failed to clear pipe status text after emit for %s", pipe_index)
try:
self._pipe_step_total.pop(pipe_index, None)
self._pipe_step_done.pop(pipe_index, None)
except Exception:
pass
logger.exception("Failed to pop pipe step totals after emit for %s", pipe_index)
# Start next subtask spinner.
next_index = active + 1
@@ -1468,6 +1477,7 @@ class PipelineLiveProgress:
"description",
"") or "").strip() or None
except Exception:
logger.exception("Failed to set active subtask text for next subtask %s in on_emit", nxt)
self._active_subtask_text = None
else:
self._active_subtask_text = None
@@ -1504,7 +1514,7 @@ class PipelineLiveProgress:
subtasks.stop_task(sub_id)
subtasks.update(sub_id, visible=False)
except Exception:
pass
logger.exception("Failed to stop or hide subtask %s during finish_pipe for pipe %s", sub_id, pipe_index)
# If we just finished the active pipe, clear the title context.
self._active_subtask_text = None
@@ -1513,19 +1523,19 @@ class PipelineLiveProgress:
try:
self.clear_pipe_status_text(pipe_index)
except Exception:
pass
logger.exception("Failed to clear pipe status text during finish_pipe for %s", pipe_index)
try:
self._pipe_step_total.pop(pipe_index, None)
self._pipe_step_done.pop(pipe_index, None)
except Exception:
pass
logger.exception("Failed to pop pipe step totals during finish_pipe for %s", pipe_index)
# Stop the per-pipe timer once the pipe is finished.
try:
pipe_task = self._pipe_tasks[pipe_index]
pipe_progress.stop_task(pipe_task)
except Exception:
pass
logger.exception("Failed to stop pipe task %s during finish_pipe", pipe_index)
self._update_overall()
@@ -1537,7 +1547,7 @@ class PipelineLiveProgress:
try:
self.finish_pipe(idx)
except Exception:
pass
logger.exception("Failed to finish pipe %s in complete_all_pipes", idx)
class PipelineStageContext:
@@ -1568,7 +1578,7 @@ class PipelineStageContext:
try:
cb(obj)
except Exception:
pass
logger.exception("Error in PipelineStageContext.emit callback")
def get_current_command_text(self) -> str:
"""Get the current command text (for backward compatibility)."""

View File

@@ -10,6 +10,8 @@ from contextvars import ContextVar
from typing import Any, Dict, List, Optional, Sequence, Callable
from SYS.models import PipelineStageContext
from SYS.logger import log, debug, is_debug_enabled
import logging
logger = logging.getLogger(__name__)
from SYS.worker import WorkerManagerRegistry, WorkerStages
from SYS.cli_parsing import SelectionSyntax, SelectionFilterSyntax
from SYS.rich_display import stdout_console
@@ -62,7 +64,7 @@ def suspend_live_progress():
try:
ui.resume()
except Exception:
pass
logger.exception("Failed to resume live progress UI after suspend")
def _is_selectable_table(table: Any) -> bool:
@@ -237,7 +239,7 @@ def print_if_visible(*args: Any, file=None, **kwargs: Any) -> None:
if should_print:
log(*args, **kwargs) if file is None else log(*args, file=file, **kwargs)
except Exception:
pass
logger.exception("Error in print_if_visible")
def store_value(key: str, value: Any) -> None:
@@ -253,7 +255,7 @@ def store_value(key: str, value: Any) -> None:
state = _get_pipeline_state()
state.pipeline_values[text] = value
except Exception:
pass
logger.exception("Failed to store pipeline value '%s'", key)
def load_value(key: str, default: Any = None) -> Any:
@@ -330,7 +332,7 @@ def set_pending_pipeline_tail(
state.pending_pipeline_source = clean_source if clean_source else None
except Exception:
# Keep existing pending tail on failure
pass
logger.exception("Failed to set pending pipeline tail; keeping existing pending tail")
def get_pending_pipeline_tail() -> List[List[str]]:
@@ -627,24 +629,9 @@ def set_last_result_table(
if result_table.rows and len(sorted_items) == len(result_table.rows):
state.last_result_items = sorted_items
except Exception:
pass
logger.exception("Failed to sort result_table and reorder items")
def set_last_result_table_overlay(
result_table: Optional[Any],
items: Optional[List[Any]] = None,
subject: Optional[Any] = None
) -> None:
"""
Set a result table as an overlay (display only, no history).
"""
state = _get_pipeline_state()
state.display_table = result_table
state.display_items = items or []
state.display_subject = subject
# Sort table by Title/Name column alphabetically if available
if (
result_table is not None
and hasattr(result_table, "sort_by_title")
@@ -662,23 +649,7 @@ def set_last_result_table_overlay(
if len(sorted_items) == len(result_table.rows):
state.display_items = sorted_items
except Exception:
pass
def set_last_result_table_preserve_history(
result_table: Optional[Any],
items: Optional[List[Any]] = None,
subject: Optional[Any] = None
) -> None:
"""
Update the last result table WITHOUT adding to history.
"""
state = _get_pipeline_state()
# Update current table WITHOUT pushing to history
state.last_result_table = result_table
state.last_result_items = items or []
state.last_result_subject = subject
logger.exception("Failed to sort overlay result_table and reorder items")
@@ -747,7 +718,7 @@ def restore_previous_result_table() -> bool:
try:
debug_table_state("restore_previous_result_table")
except Exception:
pass
logger.exception("Failed to debug_table_state during restore_previous_result_table")
return True
@@ -805,7 +776,7 @@ def restore_next_result_table() -> bool:
try:
debug_table_state("restore_next_result_table")
except Exception:
pass
logger.exception("Failed to debug_table_state during restore_next_result_table")
return True
@@ -926,7 +897,7 @@ def debug_table_state(label: str = "") -> None:
f"history={len(state.result_table_history or [])} forward={len(state.result_table_forward or [])} last_selection={list(state.last_selection or [])}"
)
except Exception:
pass
logger.exception("Failed to debug_table_state buffers summary")
def get_last_selectable_result_items() -> List[Any]:
@@ -1133,7 +1104,7 @@ class PipelineExecutor:
if self._config_loader is not None:
return self._config_loader.load()
except Exception:
pass
logger.exception("Failed to use config_loader.load(); falling back to SYS.config.load_config")
try:
from SYS.config import load_config
@@ -1209,7 +1180,7 @@ class PipelineExecutor:
if hasattr(ctx, "clear_pipeline_stop"):
ctx.clear_pipeline_stop()
except Exception:
pass
logger.exception("Failed to clear pipeline stop via ctx.clear_pipeline_stop")
@staticmethod
def _maybe_seed_current_stage_table(ctx: Any) -> None:
@@ -1231,7 +1202,7 @@ class PipelineExecutor:
if last_table:
ctx.set_current_stage_table(last_table)
except Exception:
pass
logger.exception("Failed to seed current_stage_table from display or last table")
@staticmethod
def _maybe_apply_pending_pipeline_tail(ctx: Any,
@@ -1290,13 +1261,13 @@ class PipelineExecutor:
if hasattr(ctx, "clear_pending_pipeline_tail"):
ctx.clear_pending_pipeline_tail()
except Exception:
pass
logger.exception("Failed to clear pending pipeline tail after appending pending tail")
else:
try:
if hasattr(ctx, "clear_pending_pipeline_tail"):
ctx.clear_pending_pipeline_tail()
except Exception:
pass
logger.exception("Failed to clear pending pipeline tail (source mismatch branch)")
return stages
def _apply_quiet_background_flag(self, config: Any) -> Any:
@@ -1410,7 +1381,7 @@ class PipelineExecutor:
if isinstance(meta, dict):
_add(meta.get("provider"))
except Exception:
pass
logger.exception("Failed to inspect current_table/table metadata in _maybe_run_class_selector")
for item in selected_items or []:
if isinstance(item, dict):
@@ -1443,7 +1414,7 @@ class PipelineExecutor:
if prefix and is_known_provider_name(prefix):
_add(prefix)
except Exception:
pass
logger.exception("Failed while computing provider prefix heuristics in _maybe_run_class_selector")
if get_provider is not None:
for key in candidates:
@@ -1453,7 +1424,7 @@ class PipelineExecutor:
continue
except Exception:
# If the predicate fails for any reason, fall back to legacy behavior.
pass
logger.exception("is_known_provider_name predicate failed for key %s; falling back", key)
try:
provider = get_provider(key, config)
except Exception:
@@ -1511,7 +1482,7 @@ class PipelineExecutor:
if handled:
return True
except Exception:
pass
logger.exception("Failed while running store-based selector logic in _maybe_run_class_selector")
return False
@@ -1544,7 +1515,7 @@ class PipelineExecutor:
try:
worker_manager.append_stdout(worker_id, text + "\n", channel="log")
except Exception:
pass
logger.exception("Failed to append pipeline event to worker stdout for %s", worker_id)
@staticmethod
def _maybe_open_url_selection(
@@ -1632,7 +1603,7 @@ class PipelineExecutor:
kwargs["output"] = output_fn
ensure_background_notifier(worker_manager, **kwargs)
except Exception:
pass
logger.exception("Failed to enable background notifier for session_worker_ids=%r", session_worker_ids)
@staticmethod
def _get_raw_stage_texts(ctx: Any) -> List[str]:
@@ -1691,7 +1662,7 @@ class PipelineExecutor:
if last_table is not None:
ctx.set_current_stage_table(last_table)
except Exception:
pass
logger.exception("Failed to sync current_stage_table from display/last table in _maybe_apply_initial_selection")
source_cmd = None
source_args_raw = None
@@ -1836,7 +1807,7 @@ class PipelineExecutor:
f"@N expansion: {source_cmd} + selected_args={selected_row_args} + source_args={source_args}",
)
except Exception:
pass
logger.exception("Failed to record pipeline log step for @N expansion (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
stage_table = None
try:
@@ -1939,7 +1910,7 @@ class PipelineExecutor:
continue
seen_track_ids.add(tid)
except Exception:
pass
logger.exception("Failed to extract/parse track metadata in album processing")
track_items.append(tr)
if track_items:
@@ -1969,7 +1940,7 @@ class PipelineExecutor:
f"Applied @N selection {' | '.join(selection_parts)}",
)
except Exception:
pass
logger.exception("Failed to record Applied @N selection log step (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
# Auto-insert downloader stages for provider tables.
try:
@@ -1979,6 +1950,7 @@ class PipelineExecutor:
if current_table is None:
current_table = ctx.get_last_result_table()
except Exception:
logger.exception("Failed to determine current_table for selection auto-insert; defaulting to None")
current_table = None
table_type = None
try:
@@ -1990,6 +1962,7 @@ class PipelineExecutor:
if current_table and hasattr(current_table, "table") else None
)
except Exception:
logger.exception("Failed to compute table_type from current_table; using fallback attribute access")
table_type = (
current_table.table
if current_table and hasattr(current_table, "table") else None
@@ -2100,7 +2073,7 @@ class PipelineExecutor:
try:
print(f"Auto-running selection via {auto_stage[0]}")
except Exception:
pass
logger.exception("Failed to print auto-run selection message for %s", auto_stage[0])
# Append the auto stage now. If the user also provided a selection
# (e.g., @1 | add-file ...), we want to attach the row selection
# args *to the auto-inserted stage* so the download command receives
@@ -2137,44 +2110,54 @@ class PipelineExecutor:
tail = [str(x) for x in inserted[1:]]
stages[-1] = [cmd] + [str(x) for x in row_args] + tail
except Exception:
pass
logger.exception("Failed to attach selection args to auto-inserted stage")
# If no auto stage inserted and there are selection-action tokens available
# for the single selected row, apply it as the pipeline stage so a bare
# `@N` runs the intended action (e.g., get-file for hash-backed rows).
if not stages and selection_indices and len(selection_indices) == 1:
try:
idx = selection_indices[0]
debug(f"@N initial selection idx={idx} last_items={len(ctx.get_last_result_items() or [])}")
row_action = None
try:
row_action = ctx.get_current_stage_table_row_selection_action(idx)
except Exception:
row_action = None
if not row_action:
# If no auto stage inserted and there are selection-action tokens available
# for the single selected row, apply it as the pipeline stage so a bare
# `@N` runs the intended action (e.g., get-file for hash-backed rows).
if not stages and selection_indices and len(selection_indices) == 1:
try:
items = ctx.get_last_result_items() or []
if 0 <= idx < len(items):
maybe = items[idx]
# Provide explicit debug output about the payload selected
try:
if isinstance(maybe, dict):
debug(f"@N payload: hash={maybe.get('hash')} store={maybe.get('store')} _selection_args={maybe.get('_selection_args')} _selection_action={maybe.get('_selection_action')}")
else:
debug(f"@N payload object type: {type(maybe).__name__}")
except Exception:
pass
if isinstance(maybe, dict):
candidate = maybe.get("_selection_action")
if isinstance(candidate, (list, tuple)):
row_action = [str(x) for x in candidate if x is not None]
debug(f"@N restored row_action from payload: {row_action}")
except Exception:
idx = selection_indices[0]
debug(f"@N initial selection idx={idx} last_items={len(ctx.get_last_result_items() or [])}")
row_action = None
try:
row_action = ctx.get_current_stage_table_row_selection_action(idx)
except Exception:
logger.exception("Failed to get current_stage_table row selection action for idx %s", idx)
row_action = None
if row_action:
debug(f"@N applying row action -> {row_action}")
if not row_action:
try:
items = ctx.get_last_result_items() or []
if 0 <= idx < len(items):
maybe = items[idx]
try:
if isinstance(maybe, dict):
debug(f"@N payload: hash={maybe.get('hash')} store={maybe.get('store')} _selection_args={maybe.get('_selection_args')} _selection_action={maybe.get('_selection_action')}")
else:
debug(f"@N payload object type: {type(maybe).__name__}")
except Exception:
logger.exception("Failed to debug selection payload for index %s", idx)
if isinstance(maybe, dict):
candidate = maybe.get("_selection_action")
if isinstance(candidate, (list, tuple)):
row_action = [str(x) for x in candidate if x is not None]
except Exception:
row_action = None
if row_action:
debug(f"@N applying row action -> {row_action}")
stages.append(row_action)
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
pipeline_session.worker_id,
f"@N applied row action -> {' '.join(row_action)}",
)
except Exception:
logger.exception("Failed to record pipeline log step for applied row action (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
except Exception:
logger.exception("Failed to apply single-row selection action")
stages.append(row_action)
if pipeline_session and worker_manager:
try:
@@ -2183,9 +2166,7 @@ class PipelineExecutor:
f"@N applied row action -> {' '.join(row_action)}",
)
except Exception:
pass
except Exception:
pass
logger.exception("Failed to record pipeline log step for applied row action (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
else:
first_cmd = stages[0][0] if stages and stages[0] else None
if isinstance(table_type, str) and table_type.startswith("metadata.") and first_cmd not in (
@@ -2234,7 +2215,7 @@ class PipelineExecutor:
tail = [str(x) for x in inserted[1:]]
stages[0] = [cmd] + [str(x) for x in row_args] + tail
except Exception:
pass
logger.exception("Failed to attach selection args to inserted auto stage (alternate branch)")
# After inserting/appending an auto-stage, continue processing so later
# selection-expansion logic can still run (e.g., for example selectors).
@@ -2304,7 +2285,7 @@ class PipelineExecutor:
continue
i += 1
except Exception:
pass
logger.exception("Failed to inspect add-file stage tokens for potential directory; skipping Live progress")
if not name:
continue
# Display-only: avoid Live progress for relationship viewing.
@@ -2342,7 +2323,7 @@ class PipelineExecutor:
if hasattr(_pipeline_ctx, "set_live_progress"):
_pipeline_ctx.set_live_progress(progress_ui)
except Exception:
pass
logger.exception("Failed to register PipelineLiveProgress with pipeline context")
pipe_index_by_stage = {
stage_idx: pipe_idx
for pipe_idx, stage_idx in enumerate(pipe_stage_indices)
@@ -2366,7 +2347,7 @@ class PipelineExecutor:
if hasattr(ctx, "set_current_stage_table"):
ctx.set_current_stage_table(None)
except Exception:
pass
logger.exception("Failed to clear current_stage_table in execute_tokens")
# Preflight (URL-duplicate prompts, etc.) should be cached within a single
# pipeline run, not across independent pipelines.
@@ -2374,7 +2355,7 @@ class PipelineExecutor:
ctx.store_value("preflight",
{})
except Exception:
pass
logger.exception("Failed to set preflight cache in execute_tokens")
stages = self._split_stages(tokens)
if not stages:
@@ -2482,7 +2463,7 @@ class PipelineExecutor:
try:
ctx.set_last_items(pipe_items)
except Exception:
pass
logger.exception("Failed to set last items after @ selection")
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
@@ -2490,7 +2471,7 @@ class PipelineExecutor:
"@ used last result items"
)
except Exception:
pass
logger.exception("Failed to record pipeline log step for '@ used last result items' (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
continue
subject = ctx.get_last_result_subject()
@@ -2505,7 +2486,7 @@ class PipelineExecutor:
list) else [subject]
ctx.set_last_items(subject_items)
except Exception:
pass
logger.exception("Failed to set last_items from subject during @ handling")
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
@@ -2513,7 +2494,7 @@ class PipelineExecutor:
"@ used current table subject"
)
except Exception:
pass
logger.exception("Failed to record pipeline log step for '@ used current table subject' (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
continue
if cmd_name.startswith("@"): # selection stage
@@ -2550,7 +2531,7 @@ class PipelineExecutor:
ctx.set_current_stage_table(display_table)
stage_table = display_table
except Exception:
pass
logger.exception("Failed to set current_stage_table from display table during selection processing")
if not stage_table and display_table is not None:
stage_table = display_table
@@ -2561,7 +2542,7 @@ class PipelineExecutor:
if hasattr(ctx, "debug_table_state"):
ctx.debug_table_state(f"selection {selection_token}")
except Exception:
pass
logger.exception("Failed to debug_table_state during selection %s", selection_token)
if display_table is not None and stage_table is display_table:
items_list = ctx.get_last_result_items() or []
@@ -2600,9 +2581,9 @@ class PipelineExecutor:
try:
debug(f"Selection sample object: provider={getattr(sample, 'provider', None)} store={getattr(sample, 'store', None)}")
except Exception:
pass
logger.exception("Failed to debug selection sample object")
except Exception:
pass
logger.exception("Failed to produce selection debug sample for token %s", selection_token)
if not filtered:
print("No items matched selection\n")
@@ -2628,14 +2609,14 @@ class PipelineExecutor:
if base_table is not None and getattr(base_table, "table", None):
new_table.set_table(str(getattr(base_table, "table")))
except Exception:
pass
logger.exception("Failed to set table on new_table for filter overlay")
try:
# Attach a one-line header so users see the active filter.
safe = str(selection_token)[1:].strip()
new_table.set_header_line(f'filter: "{safe}"')
except Exception:
pass
logger.exception("Failed to set header line for filter overlay for token %s", selection_token)
for item in filtered:
new_table.add_result(item)
@@ -2643,15 +2624,15 @@ class PipelineExecutor:
try:
ctx.set_last_result_table_overlay(new_table, items=list(filtered), subject=ctx.get_last_result_subject())
except Exception:
pass
logger.exception("Failed to set last_result_table_overlay for filter selection")
try:
stdout_console().print()
stdout_console().print(new_table)
except Exception:
pass
logger.exception("Failed to render filter overlay to stdout_console")
except Exception:
pass
logger.exception("Failed while rendering filter overlay for selection %s", selection_token)
continue
# UX: selecting a single URL row from get-url tables should open it.
@@ -2667,7 +2648,7 @@ class PipelineExecutor:
stage_is_last=(stage_index + 1 >= len(stages)),
)
except Exception:
pass
logger.exception("Failed to open URL selection for table %s", getattr(current_table, 'table', None))
if PipelineExecutor._maybe_run_class_selector(
ctx,
@@ -2685,6 +2666,7 @@ class PipelineExecutor:
).replace("_",
"-").lower()
except Exception:
logger.exception("Failed to determine next_cmd during selection expansion for stage_index %s", stage_index)
next_cmd = None
def _is_tag_row(obj: Any) -> bool:
@@ -2696,12 +2678,12 @@ class PipelineExecutor:
"tag_name")):
return True
except Exception:
pass
logger.exception("Failed to inspect TagItem object while checking _is_tag_row")
try:
if isinstance(obj, dict) and obj.get("tag_name"):
return True
except Exception:
pass
logger.exception("Failed to inspect dict tag_name while checking _is_tag_row")
return False
if (next_cmd in {"delete-tag",
@@ -2788,7 +2770,7 @@ class PipelineExecutor:
try:
print(f"Auto-running selection via {auto_stage[0]}")
except Exception:
pass
logger.exception("Failed to print auto-run selection message for %s", auto_stage[0])
stages.append(list(auto_stage))
else:
if auto_stage:
@@ -2885,12 +2867,12 @@ class PipelineExecutor:
stdout_console().print()
stdout_console().print(overlay_table)
except Exception:
pass
logger.exception("Failed to render overlay_table to stdout_console")
if session:
try:
session.close()
except Exception:
pass
logger.exception("Failed to close pipeline stage session")
except Exception as exc:
pipeline_status = "failed"
@@ -2907,26 +2889,26 @@ class PipelineExecutor:
try:
progress_ui.complete_all_pipes()
except Exception:
pass
logger.exception("Failed to complete all pipe UI tasks in progress_ui.complete_all_pipes")
try:
progress_ui.stop()
except Exception:
pass
logger.exception("Failed to stop progress_ui")
try:
from SYS import pipeline as _pipeline_ctx
if hasattr(_pipeline_ctx, "set_live_progress"):
_pipeline_ctx.set_live_progress(None)
except Exception:
pass
logger.exception("Failed to clear live_progress on pipeline context")
# Close pipeline session and log final status
try:
if pipeline_session and worker_manager:
pipeline_session.close(status=pipeline_status, error_msg=pipeline_error)
except Exception:
pass
logger.exception("Failed to close pipeline session during finalization")
try:
if pipeline_session and worker_manager:
self._log_pipeline_event(worker_manager, pipeline_session.worker_id,
f"Pipeline {pipeline_status}: {pipeline_error or ''}")
except Exception:
pass
logger.exception("Failed to log final pipeline status (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))

View File

@@ -3,6 +3,8 @@ from __future__ import annotations
import sys
from contextlib import contextmanager
from typing import Any, Iterator, Optional, Sequence, Tuple
import logging
logger = logging.getLogger(__name__)
class PipelineProgress:
@@ -31,6 +33,7 @@ class PipelineProgress:
) if hasattr(self._ctx,
"get_live_progress") else None
except Exception:
logger.exception("Failed to get live progress UI from pipeline context")
ui = None
pipe_idx: int = 0
@@ -48,6 +51,7 @@ class PipelineProgress:
if isinstance(maybe_idx, int):
pipe_idx = int(maybe_idx)
except Exception:
logger.exception("Failed to determine pipe index from stage context")
pipe_idx = 0
return ui, pipe_idx
@@ -61,6 +65,7 @@ class PipelineProgress:
if callable(begin):
begin(int(pipe_idx), total_steps=int(total_steps))
except Exception:
logger.exception("Failed to call begin_pipe_steps on UI")
return
def step(self, text: str) -> None:
@@ -72,6 +77,7 @@ class PipelineProgress:
if callable(adv):
adv(int(pipe_idx), str(text))
except Exception:
logger.exception("Failed to advance pipe step on UI")
return
def set_percent(self, percent: int) -> None:
@@ -83,6 +89,7 @@ class PipelineProgress:
if callable(set_pct):
set_pct(int(pipe_idx), int(percent))
except Exception:
logger.exception("Failed to set pipe percent on UI")
return
def set_status(self, text: str) -> None:
@@ -94,6 +101,7 @@ class PipelineProgress:
if callable(setter):
setter(int(pipe_idx), str(text))
except Exception:
logger.exception("Failed to set pipe status text on UI")
return
def clear_status(self) -> None:
@@ -105,6 +113,7 @@ class PipelineProgress:
if callable(clr):
clr(int(pipe_idx))
except Exception:
logger.exception("Failed to clear pipe status text on UI")
return
def begin_transfer(self, *, label: str, total: Optional[int] = None) -> None:
@@ -116,6 +125,7 @@ class PipelineProgress:
if callable(fn):
fn(label=str(label or "transfer"), total=total)
except Exception:
logger.exception("Failed to begin transfer on UI")
return
def update_transfer(
@@ -133,6 +143,7 @@ class PipelineProgress:
if callable(fn):
fn(label=str(label or "transfer"), completed=completed, total=total)
except Exception:
logger.exception("Failed to update transfer on UI")
return
def finish_transfer(self, *, label: str) -> None:
@@ -144,6 +155,7 @@ class PipelineProgress:
if callable(fn):
fn(label=str(label or "transfer"))
except Exception:
logger.exception("Failed to finish transfer on UI")
return
def begin_pipe(
@@ -164,6 +176,7 @@ class PipelineProgress:
items_preview=list(items_preview or []),
)
except Exception:
logger.exception("Failed to begin pipe on UI")
return
def on_emit(self, emitted: Any) -> None:
@@ -178,6 +191,7 @@ class PipelineProgress:
try:
self._local_ui.on_emit(0, emitted)
except Exception:
logger.exception("Failed to call local UI on_emit")
return
def ensure_local_ui(
@@ -196,6 +210,7 @@ class PipelineProgress:
"get_live_progress") else None
)
except Exception:
logger.exception("Failed to check existing live progress from pipeline context")
existing = None
if existing is not None:
@@ -213,6 +228,7 @@ class PipelineProgress:
self._ctx.set_live_progress(ui)
self._local_attached = True
except Exception:
logger.exception("Failed to attach local UI to pipeline context")
self._local_attached = False
try:
@@ -223,11 +239,12 @@ class PipelineProgress:
items_preview=list(items_preview or [])
)
except Exception:
pass
logger.exception("Failed to begin_pipe on local UI")
self._local_ui = ui
return True
except Exception:
logger.exception("Failed to create local PipelineLiveProgress UI")
self._local_ui = None
self._local_attached = False
return False
@@ -239,18 +256,18 @@ class PipelineProgress:
try:
self._local_ui.finish_pipe(0, force_complete=bool(force_complete))
except Exception:
pass
logger.exception("Failed to finish local UI pipe")
try:
self._local_ui.stop()
except Exception:
pass
logger.exception("Failed to stop local UI")
finally:
self._local_ui = None
try:
if self._local_attached and hasattr(self._ctx, "set_live_progress"):
self._ctx.set_live_progress(None)
except Exception:
pass
logger.exception("Failed to detach local progress from pipeline context")
self._local_attached = False
@contextmanager

View File

@@ -25,6 +25,9 @@ from ProviderCore.base import SearchResult
from SYS.html_table import extract_records
import lxml.html as lxml_html
import logging
logger = logging.getLogger(__name__)
class TableProviderMixin:
"""Mixin to simplify providers that scrape table/list results from HTML.
@@ -56,15 +59,18 @@ class TableProviderMixin:
resp = client.get(url)
content = resp.content
except Exception:
logger.exception("Failed to fetch URL %s for provider %s", url, getattr(self, 'name', '<provider>'))
return []
# Ensure we pass an lxml document or string (httpx returns bytes)
try:
doc = lxml_html.fromstring(content)
except Exception:
logger.debug("Failed to parse content with lxml; attempting to decode as utf-8", exc_info=True)
try:
doc = content.decode("utf-8")
except Exception:
logger.debug("Failed to decode content as utf-8; falling back to str()", exc_info=True)
doc = str(content)
records, chosen = extract_records(doc, base_url=url, xpaths=xpaths or self.DEFAULT_XPATHS)

View File

@@ -43,6 +43,9 @@ else:
# Reuse the existing format_bytes helper under a clearer alias
from SYS.utils import format_bytes as format_mb
import logging
logger = logging.getLogger(__name__)
def _sanitize_cell_text(value: Any) -> str:
"""Coerce to a single-line, tab-free string suitable for terminal display."""
@@ -82,6 +85,7 @@ def _format_duration_hms(duration: Any) -> str:
else:
seconds = float(duration)
except Exception:
logger.debug("Failed to format duration '%s' to hms", duration, exc_info=True)
return ""
if seconds < 0:
@@ -118,6 +122,7 @@ class TableColumn:
try:
return self.extractor(item)
except Exception:
logger.exception("TableColumn.extract failed for key '%s'", self.key)
return None
@@ -137,6 +142,7 @@ def _as_dict(item: Any) -> Optional[Dict[str, Any]]:
if hasattr(item, "__dict__"):
return dict(getattr(item, "__dict__"))
except Exception:
logger.exception("Failed to convert %s to dict in _as_dict", type(item))
return None
return None
@@ -201,6 +207,7 @@ def extract_ext_value(item: Any) -> str:
if suf:
ext = suf.lstrip(".")
except Exception:
logger.debug("Failed to extract suffix from raw_path: %r", raw_path, exc_info=True)
ext = ""
ext_str = str(ext or "").strip().lstrip(".")
@@ -242,6 +249,7 @@ def extract_size_bytes_value(item: Any) -> Optional[int]:
# Some sources might provide floats or numeric strings
return int(float(s))
except Exception:
logger.debug("Failed to parse size value '%r' to int", size_val, exc_info=True)
return None
@@ -471,6 +479,7 @@ class Table:
"get_current_cmdlet_name") else ""
)
except Exception:
logger.debug("Failed to get current cmdlet name from pipeline context", exc_info=True)
cmdlet_name = ""
stage_text = ""
@@ -481,6 +490,7 @@ class Table:
"get_current_stage_text") else ""
)
except Exception:
logger.debug("Failed to get current stage text from pipeline context", exc_info=True)
stage_text = ""
if cmdlet_name and stage_text:
@@ -494,7 +504,8 @@ class Table:
"-").startswith(normalized_cmd):
self.title = normalized_stage
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to introspect pipeline context to set ResultTable title")
self.title_width = title_width
self.max_columns = (
max_columns if max_columns is not None else 5
@@ -558,6 +569,7 @@ class Table:
try:
return dict(self.table_metadata)
except Exception:
logger.exception("Failed to copy table metadata")
return {}
def _interactive(self, interactive: bool = True) -> "Table":
@@ -835,7 +847,8 @@ class Table:
val = col.format_fn(val)
row.add_column(col.header, val)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to extract column '%s' for row %r", getattr(col, 'header', '<col>'), r)
return instance
@@ -913,11 +926,13 @@ class Table:
md = getattr(result, "full_metadata", None)
md_dict = dict(md) if isinstance(md, dict) else {}
except Exception:
logger.debug("Failed to extract full_metadata for result of type %s", type(result), exc_info=True)
md_dict = {}
try:
selection_args = getattr(result, "selection_args", None)
except Exception:
logger.debug("Failed to get selection_args from result of type %s", type(result), exc_info=True)
selection_args = None
if selection_args is None:
selection_args = md_dict.get("_selection_args") or md_dict.get("selection_args")
@@ -927,6 +942,7 @@ class Table:
try:
selection_action = getattr(result, "selection_action", None)
except Exception:
logger.debug("Failed to get selection_action from result of type %s", type(result), exc_info=True)
selection_action = None
if selection_action is None:
selection_action = md_dict.get("_selection_action") or md_dict.get("selection_action")
@@ -1084,13 +1100,16 @@ class Table:
and "table" not in visible_data and "source" not in visible_data):
visible_data["store"] = store_extracted
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to extract store value for item: %r", data)
try:
ext_extracted = extract_ext_value(data)
# Always ensure `ext` exists so priority_groups keeps a stable column.
visible_data["ext"] = str(ext_extracted or "")
except Exception:
from SYS.logger import logger
logger.exception("Failed to extract ext value for item: %r", data)
visible_data.setdefault("ext", "")
try:
@@ -1099,7 +1118,8 @@ class Table:
and "size" not in visible_data):
visible_data["size_bytes"] = size_extracted
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to extract size bytes for item: %r", data)
# Handle extension separation for local files
store_val = str(
@@ -1168,7 +1188,8 @@ class Table:
col_value,
integer_only=False
)
except Exception:
except Exception as exc:
logger.debug("Failed to format 'size' column value: %r", col_value, exc_info=True)
col_value_str = format_value(col_value)
elif isinstance(col_name,
str) and col_name.strip().lower() == "duration":
@@ -1178,7 +1199,8 @@ class Table:
else:
dur = _format_duration_hms(col_value)
col_value_str = dur or format_value(col_value)
except Exception:
except Exception as exc:
logger.debug("Failed to format 'duration' column value: %r", col_value, exc_info=True)
col_value_str = format_value(col_value)
else:
col_value_str = format_value(col_value)
@@ -1201,7 +1223,7 @@ class Table:
) # Don't display full metadata as column
except Exception:
# Fall back to regular field handling if columns format is unexpected
pass
logger.exception("Failed to process 'columns' dynamic field list: %r", visible_data.get("columns"))
# Only add priority groups if we haven't already filled columns from 'columns' field
if column_count == 0:

View File

@@ -8,6 +8,9 @@ possible and let callers decide whether to `Console.print()` or capture output.
from __future__ import annotations
from typing import Any, Dict, Iterable, Optional
import logging
logger = logging.getLogger(__name__)
from SYS.result_table_api import ColumnSpec, ResultModel, ResultTable, Renderer
@@ -40,11 +43,13 @@ class RichRenderer(Renderer):
if col.format_fn:
try:
cell = col.format_fn(raw)
except Exception:
except Exception as exc:
logger.exception("Column format function failed for '%s': %s", col.header, exc)
cell = str(raw or "")
else:
cell = str(raw or "")
except Exception:
except Exception as exc:
logger.exception("Column extractor failed for '%s': %s", col.header, exc)
cell = ""
cells.append(cell)
table.add_row(*cells)

View File

@@ -29,7 +29,8 @@ try:
except TypeError:
_pretty_install()
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to configure rich pretty-printing")
_STDOUT_CONSOLE = Console(file=sys.stdout)
_STDERR_CONSOLE = Console(file=sys.stderr)
@@ -261,8 +262,10 @@ def render_image_to_console(image_path: str | Path, max_width: int | None = None
console.print(line)
except Exception:
# Silently fail if image cannot be rendered (e.g. missing PIL or corrupted file)
pass
# Emit logs to help diagnose rendering failures (PIL missing, corrupt file, terminal limitations)
from SYS.logger import logger
logger.exception("Failed to render image to console: %s", image_path)
return
def render_item_details_panel(item: Dict[str, Any], *, title: Optional[str] = None) -> None:
@@ -279,7 +282,8 @@ def render_item_details_panel(item: Dict[str, Any], *, title: Optional[str] = No
view.title = ""
view.header_lines = []
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to sanitize ItemDetailView title/header before printing")
# We want to print ONLY the elements from ItemDetailView, so we don't use stdout_console().print(view)
# as that would include the (empty) results panel.

View File

@@ -528,6 +528,7 @@ def get_api_key(config: dict[str, Any], service: str, key_path: str) -> str | No
return None
except Exception:
_format_logger.exception("Failed to resolve nested config key '%s'", key_path)
return None

View File

@@ -9,6 +9,7 @@ from typing import Any, Dict, Optional, Set, TextIO, Sequence
from SYS.config import get_local_storage_path
from SYS.worker_manager import WorkerManager
from SYS.logger import log
class WorkerOutputMirror(io.TextIOBase):
@@ -69,7 +70,8 @@ class WorkerOutputMirror(io.TextIOBase):
try:
self._manager.append_stdout(self._worker_id, text, channel=self._channel)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to append stdout for worker '%s' channel '%s'", self._worker_id, self._channel)
@property
def encoding(self) -> str: # type: ignore[override]
@@ -112,7 +114,8 @@ class WorkerStageSession:
self.stdout_proxy.flush()
self.stderr_proxy.flush()
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to flush worker stdout/stderr proxies for '%s'", self.worker_id)
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
@@ -121,7 +124,8 @@ class WorkerStageSession:
try:
self.manager.disable_logging_for_worker(self.worker_id)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to disable logging for worker '%s'", self.worker_id)
try:
if status == "completed":
@@ -131,14 +135,16 @@ class WorkerStageSession:
self.worker_id, f"{self._error_label}: {error_msg or status}"
)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to log step for worker '%s' status='%s' error='%s'", self.worker_id, status, error_msg)
try:
self.manager.finish_worker(
self.worker_id, result=status or "completed", error_msg=error_msg or ""
)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to finish worker '%s' with status '%s'", self.worker_id, status)
if self.config and self.config.get("_current_worker_id") == self.worker_id:
self.config.pop("_current_worker_id", None)
@@ -177,7 +183,8 @@ class WorkerManagerRegistry:
try:
cls._manager.close()
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to close existing WorkerManager during registry ensure")
cls._manager = WorkerManager(resolved_root, auto_refresh_interval=0.5)
cls._manager_root = resolved_root
@@ -192,7 +199,8 @@ class WorkerManagerRegistry:
reason="CLI session ended unexpectedly; marking worker as failed",
)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to expire running workers during registry ensure")
else:
cls._orphan_cleanup_done = True
@@ -202,7 +210,7 @@ class WorkerManagerRegistry:
return manager
except Exception as exc:
print(f"[worker] Could not initialize worker manager: {exc}", file=sys.stderr)
log(f"[worker] Could not initialize worker manager: {exc}", file=sys.stderr)
return None
@classmethod
@@ -212,7 +220,8 @@ class WorkerManagerRegistry:
try:
cls._manager.close()
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to close WorkerManager during registry.close()")
cls._manager = None
cls._manager_root = None
cls._orphan_cleanup_done = False
@@ -254,7 +263,7 @@ class WorkerStages:
if not tracked:
return None
except Exception as exc:
print(f"[worker] Failed to track {worker_type}: {exc}", file=sys.stderr)
log(f"[worker] Failed to track {worker_type}: {exc}", file=sys.stderr)
return None
if session_worker_ids is not None:
@@ -279,7 +288,8 @@ class WorkerStages:
try:
worker_manager.log_step(worker_id, f"Started {worker_type}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to log start step for worker '%s'", worker_id)
return WorkerStageSession(
manager=worker_manager,

View File

@@ -400,7 +400,7 @@ class WorkerManager:
try:
self.flush_worker_stdout(worker_id)
except Exception:
pass
logger.exception("Failed to flush worker stdout for '%s'", worker_id)
logger.debug(
f"[WorkerManager] Disabled logging for worker: {worker_id}"
@@ -516,7 +516,7 @@ class WorkerManager:
try:
self.flush_worker_stdout(worker_id)
except Exception:
pass
logger.exception("Failed to flush worker stdout for '%s' during finish", worker_id)
kwargs = {
"status": "finished",
"result": result
@@ -900,7 +900,7 @@ class WorkerManager:
try:
self._flush_all_stdout_buffers()
except Exception:
pass
logger.exception("Failed to flush all stdout buffers during WorkerManager.close()")
logger.info("[WorkerManager] Closed")
def _flush_all_stdout_buffers(self) -> None:

View File

@@ -269,45 +269,7 @@ class Store:
# get_backend_instance implementation moved to the bottom of this file to avoid
# instantiating all backends during startup (see function `get_backend_instance`).
def _resolve_backend_name(self, backend_name: str) -> tuple[Optional[str], Optional[str]]:
requested = str(backend_name or "")
if requested in self._backends:
return requested, None
requested_norm = _normalize_store_type(requested)
ci_matches = [
name for name in self._backends
if _normalize_store_type(name) == requested_norm
]
if len(ci_matches) == 1:
return ci_matches[0], None
if len(ci_matches) > 1:
return None, f"Ambiguous store alias '{backend_name}' matches {ci_matches}"
type_matches = [
name for name, store_type in self._backend_types.items()
if store_type == requested_norm
]
if len(type_matches) == 1:
return type_matches[0], None
if len(type_matches) > 1:
return None, (
f"Ambiguous store alias '{backend_name}' matches type '{requested_norm}': {type_matches}"
)
prefix_matches = [
name for name, store_type in self._backend_types.items()
if store_type.startswith(requested_norm)
]
if len(prefix_matches) == 1:
return prefix_matches[0], None
if len(prefix_matches) > 1:
return None, (
f"Ambiguous store alias '{backend_name}' matches type prefix '{requested_norm}': {prefix_matches}"
)
return None, None
# Duplicate _resolve_backend_name removed — the method is defined once earlier in the class.
def get_backend_error(self, backend_name: str) -> Optional[str]:
return self._backend_errors.get(str(backend_name))

30
TUI.py
View File

@@ -29,6 +29,8 @@ from textual.widgets import (
)
from textual.widgets.option_list import Option
import logging
logger = logging.getLogger(__name__)
BASE_DIR = Path(__file__).resolve().parent
REPO_ROOT = BASE_DIR
@@ -75,7 +77,7 @@ def _extract_tag_names(emitted: Sequence[Any]) -> List[str]:
tags.append(val)
continue
except Exception:
pass
logger.exception("Error extracting tag_name in _extract_tag_names")
if isinstance(obj, dict):
# Prefer explicit tag lists
@@ -107,7 +109,7 @@ def _extract_tag_names_from_table(table: Any) -> List[str]:
if payloads:
sources.extend(payloads)
except Exception:
pass
logger.exception("Error while calling table.get_payloads")
rows = getattr(table, "rows", []) or []
for row in rows:
for col in getattr(row, "columns", []) or []:
@@ -289,7 +291,7 @@ class TagEditorPopup(ModalScreen[None]):
try:
app.call_from_thread(app._append_log_line, msg)
except Exception:
pass
logger.exception("Failed to append log line from background thread")
def _log_pipeline_command(stage: str, cmd: str) -> None:
if not cmd:
@@ -377,7 +379,7 @@ class TagEditorPopup(ModalScreen[None]):
self._seeds,
)
except Exception:
pass
logger.exception("Failed to refresh tag overlay")
try:
app.call_from_thread(_refresh_overlay)
@@ -513,7 +515,7 @@ class PipelineHubApp(App):
config = load_config()
SharedArgs._refresh_store_choices_cache(config)
except Exception:
pass
logger.exception("Failed to refresh store choices cache")
self._populate_store_options()
self._load_cmdlet_names()
@@ -535,7 +537,7 @@ class PipelineHubApp(App):
store_display = ", ".join(stores[:10]) + ("..." if len(stores) > 10 else "")
self._append_log_line(f"Startup config: providers={len(provs)} ({prov_display or '(none)'}), stores={len(stores)} ({store_display or '(none)'}), db={db.db_path.name}")
except Exception:
pass
logger.exception("Failed to produce startup config summary")
# ------------------------------------------------------------------
# Actions
@@ -699,7 +701,7 @@ class PipelineHubApp(App):
first = options[0]
return str(getattr(first, "prompt", "") or "")
except Exception:
pass
logger.exception("Error retrieving first suggestion from suggestion list")
return ""
def _populate_store_options(self) -> None:
@@ -715,6 +717,7 @@ class PipelineHubApp(App):
try:
stores = StoreRegistry(config=cfg, suppress_debug=True).list_backends()
except Exception:
logger.exception("Failed to list store backends from StoreRegistry")
stores = []
# Always offer a reasonable default even if config is missing.
@@ -730,7 +733,7 @@ class PipelineHubApp(App):
if (current is None) or (current == "") or (current is Select.BLANK):
self.store_select.value = options[0][1]
except Exception:
pass
logger.exception("Failed to set store select options")
def _get_selected_store(self) -> Optional[str]:
if not self.store_select:
@@ -969,7 +972,7 @@ class PipelineHubApp(App):
subject=payload_subject,
)
except Exception:
pass
logger.exception("Failed to emit tags as table")
def _load_cmdlet_names(self, force: bool = False) -> None:
try:
@@ -981,6 +984,7 @@ class PipelineHubApp(App):
for n in names if str(n).strip()}
)
except Exception:
logger.exception("Failed to load cmdlet names")
self._cmdlet_names = []
def _update_syntax_status(self, text: str) -> None:
@@ -1023,7 +1027,7 @@ class PipelineHubApp(App):
# Fallback for older/newer Textual APIs.
self.suggestion_list.options = [] # type: ignore[attr-defined]
except Exception:
pass
logger.exception("Failed to clear suggestion list options via fallback")
try:
self.suggestion_list.add_options(
@@ -1035,7 +1039,7 @@ class PipelineHubApp(App):
Option(m) for m in matches
] # type: ignore[attr-defined]
except Exception:
pass
logger.exception("Failed to set suggestion list options via fallback")
self.suggestion_list.display = True
@@ -1173,12 +1177,12 @@ class PipelineHubApp(App):
if store_name and not str(seeds.get("store") or "").strip():
seeds["store"] = store_name
except Exception:
pass
logger.exception("Failed to set seed store value")
try:
if file_hash and not str(seeds.get("hash") or "").strip():
seeds["hash"] = file_hash
except Exception:
pass
logger.exception("Failed to set seed hash value")
self.push_screen(
TagEditorPopup(seeds=seeds,

View File

@@ -17,6 +17,8 @@ from Store.registry import _discover_store_classes, _required_keys_for
from ProviderCore.registry import list_providers
from TUI.modalscreen.matrix_room_picker import MatrixRoomPicker
from TUI.modalscreen.selection_modal import SelectionModal
import logging
logger = logging.getLogger(__name__)
class ConfigModal(ModalScreen):
"""A modal for editing the configuration."""
@@ -187,22 +189,23 @@ class ConfigModal(ModalScreen):
try:
self.query_one("#add-tool-btn", Button).display = False
except Exception:
pass
logger.exception("Failed to hide add-tool button in ConfigModal.on_mount")
# Update DB path and last-saved on mount
try:
self.query_one("#config-db-path", Static).update(self._db_path)
except Exception:
pass
logger.exception("Failed to update config DB path display in ConfigModal.on_mount")
try:
mtime = None
try:
mtime = db.db_path.stat().st_mtime
mtime = __import__('datetime').datetime.utcfromtimestamp(mtime).isoformat() + "Z"
except Exception:
logger.exception("Failed to stat DB path for last-saved time")
mtime = None
self.query_one("#config-last-save", Static).update(f"Last saved: {mtime or '(unknown)'}")
except Exception:
pass
logger.exception("Failed to update last-saved display in ConfigModal.on_mount")
self.refresh_view()
def refresh_view(self) -> None:
@@ -236,7 +239,7 @@ class ConfigModal(ModalScreen):
self.query_one("#back-btn", Button).display = (self.editing_item_name is not None)
self.query_one("#save-btn", Button).display = (self.editing_item_name is not None or self.current_category == "globals")
except Exception:
pass
logger.exception("Failed to update visibility of config modal action buttons")
render_id = self._render_id
@@ -445,7 +448,7 @@ class ConfigModal(ModalScreen):
if k:
provider_schema_map[k.upper()] = field_def
except Exception:
pass
logger.exception("Failed to retrieve provider config_schema")
# Fetch Tool schema
if item_type == "tool":
try:
@@ -457,7 +460,7 @@ class ConfigModal(ModalScreen):
if k:
provider_schema_map[k.upper()] = field_def
except Exception:
pass
logger.exception("Failed to retrieve tool config_schema")
# Use columns for better layout of inputs with paste buttons
container.mount(Label("Edit Settings"))
@@ -629,7 +632,7 @@ class ConfigModal(ModalScreen):
row.mount(Button("Paste", id=f"paste-{inp_id}", classes="paste-btn"))
idx += 1
except Exception:
pass
logger.exception("Failed to build required config inputs for provider/tool")
if (
item_type == "provider"
@@ -700,15 +703,16 @@ class ConfigModal(ModalScreen):
seen_ids.add(rid)
deduped.append(r)
except Exception:
logger.exception("Failed to process a matrix room entry while deduplicating")
continue
if self._matrix_inline_list is not None and deduped:
try:
self._render_matrix_rooms_inline(deduped)
except Exception:
pass
logger.exception("Failed to render matrix inline rooms")
except Exception:
pass
logger.exception("Failed to fetch or process matrix rooms for inline rendering")
except Exception:
self._matrix_inline_checkbox_map = {}
self._matrix_inline_list = None
@@ -785,7 +789,7 @@ class ConfigModal(ModalScreen):
try:
self.config_data = reload_config()
except Exception:
pass
logger.exception("Failed to reload config after save conflict")
self._editor_snapshot = None
self.editing_item_name = None
self.editing_item_type = None
@@ -807,7 +811,7 @@ class ConfigModal(ModalScreen):
try:
self.config_data = reload_config()
except Exception:
pass
logger.exception("Failed to reload config after durable save")
if saved == 0:
msg = f"Configuration saved (no rows changed) to {db.db_path.name}"
@@ -816,7 +820,7 @@ class ConfigModal(ModalScreen):
try:
self.notify(msg, timeout=6)
except Exception:
pass
logger.exception("Failed to show notification message in ConfigModal")
# Return to the main list view within the current category
self.editing_item_name = None
@@ -828,7 +832,7 @@ class ConfigModal(ModalScreen):
try:
log(f"Durable save failed: {exc}")
except Exception:
pass
logger.exception("Failed to call log() for durable save error")
elif bid in self._button_id_map:
action, itype, name = self._button_id_map[bid]
if action == "edit":
@@ -870,7 +874,7 @@ class ConfigModal(ModalScreen):
if cls.config_schema():
options.append(stype)
except Exception:
pass
logger.exception("Failed to inspect store class config_schema for '%s'", stype)
self.app.push_screen(SelectionModal("Select Store Type", options), callback=self.on_store_type_selected)
elif bid == "add-provider-btn":
provider_names = list(list_providers().keys())
@@ -883,7 +887,7 @@ class ConfigModal(ModalScreen):
if pcls.config_schema():
options.append(ptype)
except Exception:
pass
logger.exception("Failed to inspect provider class config_schema for '%s'", ptype)
self.app.push_screen(SelectionModal("Select Provider Type", options), callback=self.on_provider_type_selected)
elif bid == "add-tool-btn":
# Discover tool modules that advertise a config_schema()
@@ -918,22 +922,22 @@ class ConfigModal(ModalScreen):
cb = self.query_one(f"#{checkbox_id}", Checkbox)
cb.value = True
except Exception:
pass
logger.exception("Failed to set matrix inline checkbox to True for '%s'", checkbox_id)
try:
self.query_one("#matrix-inline-save", Button).disabled = False
except Exception:
pass
logger.exception("Failed to enable matrix inline save button")
elif bid == "matrix-inline-clear":
for checkbox_id in list(self._matrix_inline_checkbox_map.keys()):
try:
cb = self.query_one(f"#{checkbox_id}", Checkbox)
cb.value = False
except Exception:
pass
logger.exception("Failed to set matrix inline checkbox to False for '%s'", checkbox_id)
try:
self.query_one("#matrix-inline-save", Button).disabled = True
except Exception:
pass
logger.exception("Failed to disable matrix inline save button")
elif bid == "matrix-inline-save":
selected: List[str] = []
for checkbox_id, room_id in self._matrix_inline_checkbox_map.items():
@@ -942,7 +946,7 @@ class ConfigModal(ModalScreen):
if cb.value and room_id:
selected.append(room_id)
except Exception:
pass
logger.exception("Failed to read matrix inline checkbox '%s'", checkbox_id)
if not selected:
if self._matrix_status:
self._matrix_status.update("No default rooms were saved.")
@@ -963,7 +967,7 @@ class ConfigModal(ModalScreen):
try:
self.query_one("#matrix-inline-save", Button).disabled = True
except Exception:
pass
logger.exception("Failed to disable matrix inline save button")
self.refresh_view()
@@ -1096,7 +1100,7 @@ class ConfigModal(ModalScreen):
if key:
new_config[key] = field_def.get("default", "")
except Exception:
pass
logger.exception("Failed to load config_schema for tool '%s'", tname)
self.config_data["tool"][tname] = new_config
self.editing_item_type = "tool"
@@ -1247,7 +1251,7 @@ class ConfigModal(ModalScreen):
self._matrix_status.update("Matrix test skipped: please set both 'homeserver' and 'access_token' before testing.")
return
except Exception:
pass
logger.exception("Failed to check matrix configuration before testing")
if self._matrix_status:
self._matrix_status.update("Saving configuration before testing…")
@@ -1280,7 +1284,7 @@ class ConfigModal(ModalScreen):
try:
debug(f"[matrix] Test connection failed: {exc}\n{tb}")
except Exception:
pass
logger.exception("Failed to debug matrix test failure")
msg = str(exc) or "Matrix test failed"
m_lower = msg.lower()
@@ -1328,7 +1332,7 @@ class ConfigModal(ModalScreen):
if isinstance(parsed, (list, tuple, dict)):
return self._normalize_cached_raw(parsed if isinstance(parsed, (list, tuple)) else [parsed])
except Exception:
pass
logger.exception("Failed to parse cached_rooms JSON for provider matrix")
# Try Python literal eval (accepts single quotes, repr-style lists)
try:
@@ -1338,7 +1342,7 @@ class ConfigModal(ModalScreen):
if isinstance(parsed, (list, tuple, dict)):
return self._normalize_cached_raw(parsed if isinstance(parsed, (list, tuple)) else [parsed])
except Exception:
pass
logger.exception("Failed to parse cached_rooms as Python literal for provider matrix")
# Try to extract dict-like pairs for room_id/name when the string looks like
# a Python repr or partial dict fragment (e.g., "'room_id': '!r1', 'name': 'Room'"
@@ -1362,11 +1366,11 @@ class ConfigModal(ModalScreen):
if ids:
return [{"room_id": rid, "name": ""} for rid in ids]
except Exception:
pass
logger.exception("Failed to extract cached_rooms pairs or ids for provider matrix")
return []
except Exception:
pass
logger.exception("Failed to parse cached_rooms for provider matrix")
return []
def _normalize_cached_raw(self, parsed: List[Any]) -> List[Dict[str, Any]]:
@@ -1383,6 +1387,7 @@ class ConfigModal(ModalScreen):
if s:
out.append({"room_id": s, "name": ""})
except Exception:
logger.exception("Failed to normalize cached_rooms entry: %r", it)
continue
return out
@@ -1406,7 +1411,7 @@ class ConfigModal(ModalScreen):
self._matrix_status.update("Load skipped: please set both 'homeserver' and 'access_token' before loading rooms.")
return
except Exception:
pass
logger.exception("Failed to check matrix configuration before load")
if self._matrix_status:
self._matrix_status.update("Saving configuration before loading rooms…")
@@ -1437,7 +1442,7 @@ class ConfigModal(ModalScreen):
try:
debug(f"[matrix] Load rooms failed: {exc}\n{tb}")
except Exception:
pass
logger.exception("Failed to debug matrix load failure")
msg = str(exc) or "Matrix load failed"
if "auth" in msg.lower():
msg = msg + ". Please verify your access token and try again."
@@ -1453,14 +1458,14 @@ class ConfigModal(ModalScreen):
try:
self.notify(full_msg, severity="error", timeout=8)
except Exception:
pass
logger.exception("Failed to show Matrix load failure notification")
return
# Populate inline list
try:
self._render_matrix_rooms_inline(rooms)
except Exception:
pass
logger.exception("Failed to render inline matrix rooms")
# Persist cached rooms so they are available on next editor open
try:
@@ -1474,15 +1479,15 @@ class ConfigModal(ModalScreen):
try:
save_config(self.config_data)
except Exception:
pass
logger.exception("Failed to persist cached matrix rooms via save_config() fallback")
if self._matrix_status:
self._matrix_status.update(f"Loaded and cached {len(rooms)} room(s).")
try:
self.notify(f"Loaded {len(rooms)} rooms and cached the results", timeout=5)
except Exception:
pass
logger.exception("Failed to notify loaded-and-cached message for Matrix rooms")
except Exception:
pass
logger.exception("Failed to cache Matrix rooms after load")
def _open_matrix_room_picker(
self,
@@ -1522,15 +1527,15 @@ class ConfigModal(ModalScreen):
try:
save_config(self.config_data)
except Exception:
pass
logger.exception("Failed to persist cached matrix rooms via save_config() fallback")
if self._matrix_status:
self._matrix_status.update(f"Loaded {len(rooms)} rooms (cached)")
try:
self.notify(f"Loaded {len(rooms)} rooms and cached the results", timeout=5)
except Exception:
pass
logger.exception("Failed to notify loaded-and-cached message for Matrix rooms")
except Exception:
pass
logger.exception("Failed to cache Matrix rooms when inline view unavailable")
return
# Clear current entries
@@ -1549,7 +1554,7 @@ class ConfigModal(ModalScreen):
save_btn = self.query_one("#matrix-inline-save", Button)
save_btn.disabled = True
except Exception:
pass
logger.exception("Failed to disable matrix inline save button when no rooms returned")
return
any_selected = False
@@ -1597,7 +1602,7 @@ class ConfigModal(ModalScreen):
save_btn = self.query_one("#matrix-inline-save", Button)
save_btn.disabled = not any_selected
except Exception:
pass
logger.exception("Failed to set matrix inline save button disabled state")
def _resolve_matrix_rooms_by_ids(self, ids: Iterable[str]) -> List[Dict[str, Any]]:
"""
@@ -1628,7 +1633,7 @@ class ConfigModal(ModalScreen):
try:
debug(f"[config] failed to resolve matrix room names: {exc}")
except Exception:
pass
logger.exception("Failed to debug matrix name resolution")
return []
def on_matrix_rooms_selected(self, result: Any = None) -> None:
@@ -1683,7 +1688,7 @@ class ConfigModal(ModalScreen):
try:
self.query_one("#matrix-inline-save", Button).disabled = not any_selected
except Exception:
pass
logger.exception("Failed to update matrix inline save button")
def on_input_changed(self, event: Input.Changed) -> None:
if event.input.id:
@@ -1722,11 +1727,11 @@ class ConfigModal(ModalScreen):
try:
self.query_one("#config-db-path", Static).update(self._db_path)
except Exception:
pass
logger.exception("Failed to update config db path label")
try:
self.query_one("#config-last-save", Static).update("Last saved: (saving...)")
except Exception:
pass
logger.exception("Failed to update config last-save label")
log(f"ConfigModal scheduled save (changed={changed})")
return changed
@@ -1775,7 +1780,7 @@ class ConfigModal(ModalScreen):
try:
self.config_data = reload_config()
except Exception:
pass
logger.exception("Failed to reload config after save completion")
# Update last-saved label with file timestamp for visibility
db_mtime = None
@@ -1794,19 +1799,19 @@ class ConfigModal(ModalScreen):
try:
self.query_one("#config-last-save", Static).update(label_text)
except Exception:
pass
logger.exception("Failed to update last-save label with timestamp")
except Exception:
pass
logger.exception("Failed to compute last-save label text")
try:
self.refresh_view()
except Exception:
pass
logger.exception("Failed to refresh config editor view after save completion")
try:
self.notify(f"Configuration saved ({changed} change(s)) to {db.db_path.name}", timeout=5)
except Exception:
pass
logger.exception("Failed to show configuration saved notification")
else:
# No TUI available; log instead of updating UI
log(f"Configuration saved ({changed} change(s)) to {db.db_path.name}")
@@ -1818,17 +1823,17 @@ class ConfigModal(ModalScreen):
try:
self.notify(f"Save failed: {error}", severity="error", timeout=10)
except Exception:
pass
logger.exception("Failed to show save failed notification")
try:
self.config_data = reload_config()
except Exception:
pass
logger.exception("Failed to reload config after save failure")
try:
self.refresh_view()
except Exception:
pass
logger.exception("Failed to refresh view after save failure")
else:
log(f"Save failed: {error}")
@@ -1867,7 +1872,7 @@ class ConfigModal(ModalScreen):
if rk not in required_keys:
required_keys.append(rk)
except Exception:
pass
logger.exception("Failed to inspect provider class '%s' for required keys", item_name)
section = self.config_data.get("provider", {}).get(item_name, {})
elif item_type == "tool":
try:
@@ -1880,7 +1885,7 @@ class ConfigModal(ModalScreen):
if k and k not in required_keys:
required_keys.append(k)
except Exception:
pass
logger.exception("Failed to inspect tool module 'tool.%s' for required keys", item_name)
section = self.config_data.get("tool", {}).get(item_name, {})
# Check required keys

View File

@@ -630,7 +630,7 @@ class DownloadModal(ModalScreen):
f"Download failed: {error_reason}",
)
except Exception:
pass
logger.exception("Failed to finish worker during download failure handling")
# Also append detailed error info to worker stdout for visibility
if worker:
@@ -799,7 +799,7 @@ class DownloadModal(ModalScreen):
f"Download error: {str(e)}",
)
except Exception:
pass
logger.exception("Failed to finish worker during download error handling")
self.app.call_from_thread(self._hide_progress)
self.app.call_from_thread(self.dismiss)
return
@@ -1091,7 +1091,7 @@ class DownloadModal(ModalScreen):
try:
worker.finish("error", f"Download failed: {str(e)}")
except Exception:
pass
logger.exception("Failed to finish worker on download submit error")
self.app.call_from_thread(self._hide_progress)
self.app.call_from_thread(
self.app.notify,

View File

@@ -8,6 +8,8 @@ from textual.screen import ModalScreen
from textual.widgets import Static, Button, Checkbox, ListView, ListItem
from textual import work
from rich.text import Text
import logging
logger = logging.getLogger(__name__)
class MatrixRoomPicker(ModalScreen[List[str]]):
@@ -121,7 +123,7 @@ class MatrixRoomPicker(ModalScreen[List[str]]):
# Stop propagation so parent handlers (ConfigModal) don't react.
event.stop()
except Exception:
pass
logger.exception("Failed to stop ListView.Selected event propagation")
def _set_status(self, text: str) -> None:
if self._status_widget:
@@ -137,6 +139,7 @@ class MatrixRoomPicker(ModalScreen[List[str]]):
any_selected = True
break
except Exception:
logger.exception("Error querying checkbox in MatrixRoomPicker; skipping")
continue
if self._save_button:
self._save_button.disabled = not any_selected
@@ -214,7 +217,7 @@ class MatrixRoomPicker(ModalScreen[List[str]]):
cb = self.query_one(f"#{checkbox_id}", Checkbox)
cb.value = True
except Exception:
pass
logger.exception("Failed to set checkbox value in MatrixRoomPicker")
if self._save_button:
self._save_button.disabled = False
elif event.button.id == "matrix-room-clear":
@@ -223,7 +226,7 @@ class MatrixRoomPicker(ModalScreen[List[str]]):
cb = self.query_one(f"#{checkbox_id}", Checkbox)
cb.value = False
except Exception:
pass
logger.exception("Failed to set checkbox value to False in MatrixRoomPicker")
if self._save_button:
self._save_button.disabled = True
elif event.button.id == "matrix-room-save":
@@ -234,5 +237,5 @@ class MatrixRoomPicker(ModalScreen[List[str]]):
if cb.value and room_id:
selected.append(room_id)
except Exception:
pass
logger.exception("Failed to read checkbox state for '%s' while saving MatrixRoomPicker selection", checkbox_id)
self.dismiss(selected)

View File

@@ -501,7 +501,7 @@ class WorkersModal(ModalScreen):
try:
self.stdout_display.cursor_location = (len(combined_text) - 1, 0)
except Exception:
pass
logger.exception("Failed to set stdout_display cursor location")
logger.info("[workers-modal] Updated stdout display successfully")
except Exception as e:
logger.error(

View File

@@ -10,6 +10,7 @@ import contextlib
import io
import shlex
import sys
import traceback
from pathlib import Path
from dataclasses import dataclass, field
from typing import Any, Callable, Dict, List, Optional, Sequence
@@ -25,8 +26,9 @@ from SYS import pipeline as ctx
from CLI import ConfigLoader
from SYS.pipeline import PipelineExecutor
from SYS.worker import WorkerManagerRegistry
from SYS.logger import set_debug
from SYS.logger import set_debug, debug
from SYS.rich_display import capture_rich_output
import traceback
from SYS.result_table import Table
@@ -120,7 +122,7 @@ class PipelineRunner:
result.stderr = syntax_error.message
return result
except Exception:
pass
debug(traceback.format_exc())
try:
tokens = shlex.split(normalized)
@@ -137,11 +139,12 @@ class PipelineRunner:
try:
set_debug(bool(config.get("debug", False)))
except Exception:
pass
debug(traceback.format_exc())
try:
self._worker_manager = WorkerManagerRegistry.ensure(config)
except Exception:
debug(traceback.format_exc())
self._worker_manager = None
ctx.reset()
@@ -153,7 +156,7 @@ class PipelineRunner:
seeds = [seeds]
ctx.set_last_result_items_only(list(seeds))
except Exception:
pass
debug(traceback.format_exc())
stdout_buffer = io.StringIO()
stderr_buffer = io.StringIO()
@@ -173,7 +176,7 @@ class PipelineRunner:
try:
ctx.clear_current_command_text()
except Exception:
pass
debug(traceback.format_exc())
result.stdout = stdout_buffer.getvalue()
result.stderr = stderr_buffer.getvalue()
@@ -268,7 +271,7 @@ class PipelineRunner:
items_copy = items.copy() if isinstance(items, list) else list(items) if items else []
out.append((t, items_copy, subj))
except Exception:
pass
debug(traceback.format_exc())
return out
snap["result_table_history"] = _copy_history(state.result_table_history)
@@ -306,7 +309,7 @@ class PipelineRunner:
out.append((t, items_copy, subj))
setattr(state, key, out)
except Exception:
pass
debug(traceback.format_exc())
try:
if "live_progress" in snapshot:

View File

@@ -203,7 +203,7 @@ def _run_cli(clean_args: List[str]) -> int:
error_msg += f" - detected repo root: {repo}\n"
cli_path = repo / "CLI.py"
error_msg += f" - CLI.py exists at {cli_path}: {cli_path.exists()}\n"
except:
except Exception:
pass
error_msg += (
"\nRemedy: Run 'pip install -e scripts' from the project root or re-run the bootstrap script.\n"

View File

@@ -0,0 +1,109 @@
"""Migration utility: convert Python literal config values in the DB into canonical JSON.
Usage:
python scripts/migrate_config_literals.py [--apply] [--backup=path] [--quiet]
By default the script runs in dry-run mode and prints candidate rows it would change.
Use --apply to persist changes. --backup writes a JSON file listing changed rows before applying.
"""
from __future__ import annotations
import argparse
import sqlite3
import json
import ast
from pathlib import Path
from typing import Any, Dict, List, Tuple
DB = Path("medios.db")
def _is_json_like(s: str) -> bool:
if not isinstance(s, str):
return False
s = s.strip()
if not s:
return False
return s[0] in '{["' or s.lower() in ("true", "false", "null") or s[0].isdigit() or s[0] == "'"
def find_candidates(conn: sqlite3.Connection) -> List[Tuple[int, str, str, str, str, str]]:
cur = conn.cursor()
cur.execute("SELECT rowid, category, subtype, item_name, key, value FROM config")
rows = []
for rowid, cat, sub, name, key, val in cur.fetchall():
if val is None:
continue
s = str(val)
if _is_json_like(s):
try:
json.loads(s)
except Exception:
# Try ast.literal_eval
try:
parsed = ast.literal_eval(s)
# Only consider basic JSON-serializable types
json.dumps(parsed)
rows.append((rowid, cat, sub, name, key, s))
except Exception:
continue
return rows
def apply_migration(conn: sqlite3.Connection, candidates: List[Tuple[int, str, str, str, str, str]]) -> List[Tuple[int, str, str, str, str, str]]:
cur = conn.cursor()
changed = []
for row in candidates:
rowid, cat, sub, name, key, val = row
try:
parsed = ast.literal_eval(val)
new_val = json.dumps(parsed, ensure_ascii=False)
cur.execute("UPDATE config SET value = ? WHERE rowid = ?", (new_val, rowid))
changed.append((rowid, cat, sub, name, key, new_val))
except Exception:
continue
conn.commit()
return changed
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--apply", action="store_true", help="Persist changes to DB")
parser.add_argument("--backup", type=str, default=None, help="Path to write backup JSON of changed rows")
parser.add_argument("--quiet", action="store_true", help="Minimize output")
args = parser.parse_args()
conn = sqlite3.connect(str(DB))
candidates = find_candidates(conn)
if not args.quiet:
print(f"Found {len(candidates)} candidate rows for migration")
for r in candidates[:50]:
rowid, cat, sub, name, key, val = r
print(f"row {rowid}: {cat}.{sub}.{name} {key} -> {val[:200]!r}")
if not candidates:
return 0
if args.backup:
out_path = Path(args.backup)
data = [dict(rowid=r[0], category=r[1], subtype=r[2], item_name=r[3], key=r[4], value=r[5]) for r in candidates]
out_path.write_text(json.dumps(data, ensure_ascii=False, indent=2), encoding="utf-8")
if not args.quiet:
print(f"Wrote backup to {out_path}")
if args.apply:
changed = apply_migration(conn, candidates)
if not args.quiet:
print(f"Applied migration to {len(changed)} rows")
return 0
if not args.quiet:
print("Dry-run; re-run with --apply to persist changes")
return 0
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -428,7 +428,8 @@ class FlorenceVisionTool:
if self._model is not None and not hasattr(self._model, "_supports_sdpa"):
setattr(self._model, "_supports_sdpa", False)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set model compatibility flag _supports_sdpa")
try:
self._model.to(device) # type: ignore[union-attr]
@@ -439,7 +440,8 @@ class FlorenceVisionTool:
try:
self._model.eval() # type: ignore[union-attr]
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set Florence model to eval mode")
try:
md = getattr(self._model, "device", None)
@@ -450,7 +452,8 @@ class FlorenceVisionTool:
dt = None
debug(f"[florencevision] Model loaded: device={md} param_dtype={dt}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to inspect Florence model device/dtype")
def tags_for_image(self, media_path: Path) -> List[str]:
"""Return Florence-derived tags for an image.
@@ -472,7 +475,8 @@ class FlorenceVisionTool:
try:
debug(f"[florencevision] Task prompt: {prompt}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to emit debug Task prompt for FlorenceVision")
max_tags = max(0, int(self.defaults.max_tags or 0))
@@ -487,7 +491,8 @@ class FlorenceVisionTool:
try:
debug(f"[florencevision] Image loaded: mode={image.mode} size={image.width}x{image.height}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to emit debug for image load")
processor = self._processor
model = self._model
@@ -544,19 +549,22 @@ class FlorenceVisionTool:
)
continue
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to debug tensor shape for processor key '%s'", k)
if isinstance(v, (list, tuple)):
has_none = any(x is None for x in v)
debug(f"[florencevision] {k}: {type(v).__name__} len={len(v)} has_none={has_none}")
continue
debug(f"[florencevision] {k}: type={type(v).__name__}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed while inspecting processor output keys")
try:
inputs = inputs.to(model.device) # type: ignore[attr-defined]
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to move processor inputs to device %s", getattr(model, 'device', None))
# Align floating-point input tensors with the model's parameter dtype.
try:
@@ -575,7 +583,8 @@ class FlorenceVisionTool:
except Exception:
continue
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to inspect/align model dtype for Florence inputs")
try:
gen_inputs_all = {k: v for k, v in dict(inputs).items() if v is not None}
@@ -602,7 +611,8 @@ class FlorenceVisionTool:
):
gen_inputs["attention_mask"] = attention_mask
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to reconcile attention mask shape with input_ids for Florence processor")
try:
debug(
@@ -612,18 +622,21 @@ class FlorenceVisionTool:
f"pixel_attention_mask={'pixel_attention_mask' in forward_params}"
)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to debug model forward supports")
try:
gen_inputs.setdefault("use_cache", False)
gen_inputs.setdefault("num_beams", 1)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set default gen_inputs values")
try:
debug(f"[florencevision] generate kwargs: {sorted(list(gen_inputs.keys()))}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to debug generate kwargs")
pv = gen_inputs.get("pixel_values")
if pv is None:
@@ -654,7 +667,8 @@ class FlorenceVisionTool:
if not hasattr(model, "_supports_sdpa"):
setattr(model, "_supports_sdpa", False)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to patch model _supports_sdpa flag in retry handler")
generated_ids = _do_generate(gen_inputs)
elif "NoneType" in msg and "shape" in msg:
retry_inputs = dict(gen_inputs)
@@ -676,7 +690,8 @@ class FlorenceVisionTool:
):
retry_inputs["attention_mask"] = am
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed while filling retry_inputs attention_mask in AttributeError handler")
try:
import torch
@@ -692,14 +707,16 @@ class FlorenceVisionTool:
elif "pixel_attention_mask" in forward_params and "pixel_attention_mask" not in retry_inputs:
retry_inputs["pixel_attention_mask"] = mask
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to build mask or adjust retry_inputs in AttributeError handler")
try:
debug(
f"[florencevision] generate retry kwargs: {sorted(list(retry_inputs.keys()))}"
)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to debug generate retry kwargs")
generated_ids = _do_generate(retry_inputs)
else:
@@ -708,7 +725,8 @@ class FlorenceVisionTool:
try:
debug(f"[florencevision] generated_ids type={type(generated_ids).__name__}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to debug generated_ids type")
seq = getattr(generated_ids, "sequences", generated_ids)
generated_text = processor.batch_decode(seq, skip_special_tokens=False)[0]
@@ -719,7 +737,8 @@ class FlorenceVisionTool:
debug(f"[florencevision] prompt run failed: {type(exc).__name__}: {exc}")
debug("[florencevision] traceback:\n" + traceback.format_exc())
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to emit debug for prompt run failure: %s", exc)
raise
parsed = None
@@ -766,12 +785,14 @@ class FlorenceVisionTool:
debug(f"[florencevision] post_process[{k!r}] type={type(parsed.get(k)).__name__}")
debug("[florencevision] post_process[key] repr:\n" + _debug_repr(parsed.get(k)))
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed while debugging parsed post_process output for prompt %s", task_prompt)
else:
debug(f"[florencevision] post_process_generation: type={type(parsed).__name__}")
debug("[florencevision] post_process repr:\n" + _debug_repr(parsed))
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to post-process generated output for prompt %s", task_prompt)
return generated_text, parsed, seq
@@ -800,7 +821,8 @@ class FlorenceVisionTool:
try:
debug(f"[florencevision] candidate label strings ({len(labels)}): {labels!r}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to emit candidate label strings debug")
out: List[str] = []
seen: set[str] = set()
@@ -848,7 +870,8 @@ class FlorenceVisionTool:
for raw_lab, cleaned, reason in dropped:
debug(f"[florencevision] drop reason={reason} raw={raw_lab!r} cleaned={cleaned!r}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to emit cleaned/dropped tags debug info")
return labels, caption_candidates, out, dropped
@@ -871,7 +894,12 @@ class FlorenceVisionTool:
try:
return max(cleaned, key=lambda s: len(str(s)), default=None)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to choose best caption from cleaned candidates")
try:
return max(raw, key=lambda s: len(str(s)), default=None)
except Exception:
return None
try:
return max(raw, key=lambda s: len(str(s)), default=None)
except Exception:
@@ -936,7 +964,8 @@ class FlorenceVisionTool:
try:
debug(f"[florencevision] grounding prompt: {grounding_prompt}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to emit grounding prompt debug")
grd_text, grd_parsed, _grd_seq = _run_prompt(grounding_prompt)
_grd_labels, grd_captions, grd_cleaned, _grd_dropped = _extract_labels_and_captions(grounding_prompt, grd_text, grd_parsed)
@@ -962,6 +991,8 @@ class FlorenceVisionTool:
is_combo = "<|detailed_caption|>" in prompt and "<|grounding|>" in prompt
only_task_tokens = not final_tags or all(t in {"object_detection", "grounding", "tag"} for t in final_tags)
except Exception:
from SYS.logger import logger
logger.exception("Failed to compute is_combo/only_task_tokens for prompt '%s'", prompt)
is_combo = False
only_task_tokens = False
@@ -973,13 +1004,15 @@ class FlorenceVisionTool:
try:
self.defaults.task = "<|detailed_caption|>"
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set self.defaults.task to '<|detailed_caption|>' during od retry")
final_tags = self.tags_for_image(media_path)
finally:
try:
self.defaults.task = original_task
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to restore self.defaults.task after od retry")
self._od_tag_retrying = False
self._last_caption = caption_text if caption_text else None

View File

@@ -48,7 +48,8 @@ def _resolve_out_dir(arg_outdir: Optional[Union[str, Path]]) -> Path:
try:
p.mkdir(parents=True, exist_ok=True)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to create resolved output dir %s", p)
return p
except Exception:
return Path(tempfile.mkdtemp(prefix="pwdl_"))
@@ -425,17 +426,20 @@ def config_schema() -> List[Dict[str, Any]]:
if context is not None:
context.close()
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to close Playwright context")
try:
if browser is not None:
browser.close()
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to close Playwright browser")
try:
if pw is not None:
pw.stop()
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to stop Playwright engine")
def goto(self, page: Any, url: str) -> None:
"""Navigate with configured timeout."""
@@ -564,7 +568,8 @@ def config_schema() -> List[Dict[str, Any]]:
page.mouse.move(box['x'] + box['width'] / 2, box['y'] + box['height'] / 2)
page.mouse.click(box['x'] + box['width'] / 2, box['y'] + box['height'] / 2)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to perform mouse click for selector '%s'", selector)
resp = page.wait_for_response(
lambda r: r.status == 200 and any(k.lower() == 'content-disposition' for k in r.headers.keys()),
@@ -583,7 +588,8 @@ def config_schema() -> List[Dict[str, Any]]:
try:
debug(f"[playwright] attempt failed (headless={mode}): {traceback.format_exc()}")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to emit debug info for Playwright attempt failure")
continue
return PlaywrightDownloadResult(ok=False, error=last_error or "no download captured")
@@ -596,7 +602,8 @@ def config_schema() -> List[Dict[str, Any]]:
f"nav_timeout_ms={self.defaults.navigation_timeout_ms}"
)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to debug_dump Playwright defaults")
def _wait_for_block_clear(self, page: Any, timeout_ms: int = 8000) -> bool:
try:

View File

@@ -131,7 +131,8 @@ def _build_supported_domains() -> set[str]:
domains = extract_domains(regex)
_SUPPORTED_DOMAINS.update(domains)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to build supported domains from yt-dlp extractors")
return _SUPPORTED_DOMAINS
@@ -299,7 +300,8 @@ def _add_browser_cookies_if_available(options: Dict[str, Any], preferred_browser
log(f"Requested browser cookie DB '{preferred_browser}' not found; falling back to autodetect.")
_BROWSER_COOKIE_WARNING_EMITTED = True
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to check browser cookie path for preferred browser '%s'", preferred_browser)
# Auto-detect in common order (chrome/chromium/brave)
for candidate in ("chrome", "chromium", "brave"):
@@ -308,6 +310,8 @@ def _add_browser_cookies_if_available(options: Dict[str, Any], preferred_browser
options["cookiesfrombrowser"] = [candidate]
return
except Exception:
from SYS.logger import logger
logger.exception("Error while checking cookie path for candidate browser '%s'", candidate)
continue
if not _BROWSER_COOKIE_WARNING_EMITTED:
@@ -650,7 +654,8 @@ def format_for_table_selection(
if vcodec != "none" and acodec == "none" and format_id:
selection_format_id = f"{format_id}+ba"
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to compute selection_format_id for format: %s", fmt)
# Format file size
size_str = ""
@@ -661,7 +666,8 @@ def format_for_table_selection(
size_mb = float(size_bytes) / (1024 * 1024)
size_str = f"{size_prefix}{size_mb:.1f}MB"
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to compute size string for format: %s", fmt)
# Build description
desc_parts: List[str] = []
@@ -755,7 +761,8 @@ class YtDlpTool:
if resolved is not None and resolved.is_file():
return resolved
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to initialize cookiefile using resolve_cookies_path")
return None
def resolve_height_selector(self, format_str: Optional[str]) -> Optional[str]:
@@ -908,13 +915,15 @@ class YtDlpTool:
if bundled_ffmpeg_dir.exists():
base_options.setdefault("ffmpeg_location", str(bundled_ffmpeg_dir))
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to inspect bundled ffmpeg directory")
try:
if os.name == "nt":
base_options.setdefault("file_access_retries", 40)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set Windows-specific yt-dlp options")
if opts.cookies_path and opts.cookies_path.is_file():
base_options["cookiefile"] = str(opts.cookies_path)
@@ -948,7 +957,8 @@ class YtDlpTool:
opts = _dc.replace(opts, mode="audio", ytdl_format=None)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set opts mode to audio via dataclasses.replace")
elif opts.ytdl_format == "video":
try:
opts = opts._replace(mode="video", ytdl_format=None)
@@ -958,7 +968,8 @@ class YtDlpTool:
opts = _dc.replace(opts, mode="video", ytdl_format=None)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set opts mode to video via dataclasses.replace")
if opts.no_playlist:
base_options["noplaylist"] = True
@@ -978,7 +989,8 @@ class YtDlpTool:
opts = _dc.replace(opts, mode="audio")
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set opts mode to audio via dataclasses.replace (configured default)")
ytdl_format = None
else:
# Leave ytdl_format None so that default_format(opts.mode)
@@ -1130,7 +1142,8 @@ class YtDlpTool:
try:
debug("yt-dlp argv: " + " ".join(str(a) for a in argv))
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to debug-print yt-dlp CLI arguments")
def config_schema() -> List[Dict[str, Any]]:
@@ -1150,6 +1163,8 @@ def config_schema() -> List[Dict[str, Any]]:
if _browser_cookie_path_for(b) is not None:
browser_choices.append(b)
except Exception:
from SYS.logger import logger
logger.exception("Error while checking cookie path for browser '%s'", b)
continue
return [
@@ -1410,7 +1425,8 @@ def _download_with_sections_via_cli(
try:
_set_pipe_percent(current)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set pipeline percent to %d", current)
def start(self) -> None:
if self._thread is not None or self._start_pct >= self._max_pct:
@@ -1426,7 +1442,8 @@ def _download_with_sections_via_cli(
try:
_set_pipe_percent(self._max_pct)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set pipeline percent to max %d", self._max_pct)
session_id = hashlib.md5((url + str(time.time()) + "".join(random.choices(string.ascii_letters, k=10))).encode()).hexdigest()[:12]
first_section_info = None
@@ -1440,7 +1457,8 @@ def _download_with_sections_via_cli(
try:
_set_pipe_percent(display_pct)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set pipeline percent to display_pct %d for section %d", display_pct, section_idx)
pipeline.set_status(f"Downloading & clipping clip section {section_idx}/{total_sections}")
@@ -1484,7 +1502,8 @@ def _download_with_sections_via_cli(
try:
cmd.extend(["--ffmpeg-location", str(ytdl_options["ffmpeg_location"])])
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to append ffmpeg_location CLI option")
if ytdl_options.get("format"):
cmd.extend(["-f", ytdl_options["format"]])
if ytdl_options.get("merge_output_format"):
@@ -1547,7 +1566,8 @@ def _download_with_sections_via_cli(
try:
_set_pipe_percent(99)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to set pipeline percent to 99 at end of multi-section job")
return session_id, first_section_info or {}
@@ -1654,7 +1674,8 @@ def _progress_callback(status: Dict[str, Any]) -> None:
if isinstance(value, (int, float)) and value > 0:
return int(value)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to interpret total bytes value: %r", value)
return None
if event == "downloading":
@@ -1669,7 +1690,8 @@ def _progress_callback(status: Dict[str, Any]) -> None:
total=_total_bytes(total),
)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to update pipeline transfer for label '%s'", label)
else:
_YTDLP_PROGRESS_BAR.update(
downloaded=int(downloaded) if downloaded is not None else None,
@@ -1683,7 +1705,8 @@ def _progress_callback(status: Dict[str, Any]) -> None:
if _YTDLP_TRANSFER_STATE.get(label, {}).get("started"):
pipeline.finish_transfer(label=label)
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to finish pipeline transfer for label '%s'", label)
_YTDLP_TRANSFER_STATE.pop(label, None)
else:
_YTDLP_PROGRESS_BAR.finish()
@@ -1848,7 +1871,8 @@ def download_media(opts: DownloadOptions, *, config: Optional[Dict[str, Any]] =
if cand.suffix.lower() in {".json", ".info.json"}:
continue
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to inspect candidate suffix for %s", cand)
media_file = cand
break
if media_file is None and media_candidates:
@@ -1870,10 +1894,13 @@ def download_media(opts: DownloadOptions, *, config: Optional[Dict[str, Any]] =
if name.startswith(prefix):
return name[len(prefix):]
except Exception:
pass
from SYS.logger import logger
logger.exception("Failed to check name prefix for '%s'", name)
try:
return Path(name).suffix
except Exception:
from SYS.logger import logger
logger.exception("Failed to obtain suffix for name '%s'", name)
return ""
try:
@@ -1884,7 +1911,8 @@ def download_media(opts: DownloadOptions, *, config: Optional[Dict[str, Any]] =
try:
media_file.unlink()
except OSError:
pass
from SYS.logger import logger
logger.exception("Failed to unlink duplicate media file %s", media_file)
else:
media_file.rename(new_media_path)
debug(f"Renamed section file: {media_file.name} -> {new_media_name}")