This commit is contained in:
2026-01-31 19:00:04 -08:00
parent dcf16e0cc4
commit 6513a3ad04
25 changed files with 617 additions and 397 deletions

View File

@@ -10,6 +10,8 @@ from contextvars import ContextVar
from typing import Any, Dict, List, Optional, Sequence, Callable
from SYS.models import PipelineStageContext
from SYS.logger import log, debug, is_debug_enabled
import logging
logger = logging.getLogger(__name__)
from SYS.worker import WorkerManagerRegistry, WorkerStages
from SYS.cli_parsing import SelectionSyntax, SelectionFilterSyntax
from SYS.rich_display import stdout_console
@@ -62,7 +64,7 @@ def suspend_live_progress():
try:
ui.resume()
except Exception:
pass
logger.exception("Failed to resume live progress UI after suspend")
def _is_selectable_table(table: Any) -> bool:
@@ -237,7 +239,7 @@ def print_if_visible(*args: Any, file=None, **kwargs: Any) -> None:
if should_print:
log(*args, **kwargs) if file is None else log(*args, file=file, **kwargs)
except Exception:
pass
logger.exception("Error in print_if_visible")
def store_value(key: str, value: Any) -> None:
@@ -253,7 +255,7 @@ def store_value(key: str, value: Any) -> None:
state = _get_pipeline_state()
state.pipeline_values[text] = value
except Exception:
pass
logger.exception("Failed to store pipeline value '%s'", key)
def load_value(key: str, default: Any = None) -> Any:
@@ -330,7 +332,7 @@ def set_pending_pipeline_tail(
state.pending_pipeline_source = clean_source if clean_source else None
except Exception:
# Keep existing pending tail on failure
pass
logger.exception("Failed to set pending pipeline tail; keeping existing pending tail")
def get_pending_pipeline_tail() -> List[List[str]]:
@@ -627,24 +629,9 @@ def set_last_result_table(
if result_table.rows and len(sorted_items) == len(result_table.rows):
state.last_result_items = sorted_items
except Exception:
pass
logger.exception("Failed to sort result_table and reorder items")
def set_last_result_table_overlay(
result_table: Optional[Any],
items: Optional[List[Any]] = None,
subject: Optional[Any] = None
) -> None:
"""
Set a result table as an overlay (display only, no history).
"""
state = _get_pipeline_state()
state.display_table = result_table
state.display_items = items or []
state.display_subject = subject
# Sort table by Title/Name column alphabetically if available
if (
result_table is not None
and hasattr(result_table, "sort_by_title")
@@ -662,23 +649,7 @@ def set_last_result_table_overlay(
if len(sorted_items) == len(result_table.rows):
state.display_items = sorted_items
except Exception:
pass
def set_last_result_table_preserve_history(
result_table: Optional[Any],
items: Optional[List[Any]] = None,
subject: Optional[Any] = None
) -> None:
"""
Update the last result table WITHOUT adding to history.
"""
state = _get_pipeline_state()
# Update current table WITHOUT pushing to history
state.last_result_table = result_table
state.last_result_items = items or []
state.last_result_subject = subject
logger.exception("Failed to sort overlay result_table and reorder items")
@@ -747,7 +718,7 @@ def restore_previous_result_table() -> bool:
try:
debug_table_state("restore_previous_result_table")
except Exception:
pass
logger.exception("Failed to debug_table_state during restore_previous_result_table")
return True
@@ -805,7 +776,7 @@ def restore_next_result_table() -> bool:
try:
debug_table_state("restore_next_result_table")
except Exception:
pass
logger.exception("Failed to debug_table_state during restore_next_result_table")
return True
@@ -926,7 +897,7 @@ def debug_table_state(label: str = "") -> None:
f"history={len(state.result_table_history or [])} forward={len(state.result_table_forward or [])} last_selection={list(state.last_selection or [])}"
)
except Exception:
pass
logger.exception("Failed to debug_table_state buffers summary")
def get_last_selectable_result_items() -> List[Any]:
@@ -1133,7 +1104,7 @@ class PipelineExecutor:
if self._config_loader is not None:
return self._config_loader.load()
except Exception:
pass
logger.exception("Failed to use config_loader.load(); falling back to SYS.config.load_config")
try:
from SYS.config import load_config
@@ -1209,7 +1180,7 @@ class PipelineExecutor:
if hasattr(ctx, "clear_pipeline_stop"):
ctx.clear_pipeline_stop()
except Exception:
pass
logger.exception("Failed to clear pipeline stop via ctx.clear_pipeline_stop")
@staticmethod
def _maybe_seed_current_stage_table(ctx: Any) -> None:
@@ -1231,7 +1202,7 @@ class PipelineExecutor:
if last_table:
ctx.set_current_stage_table(last_table)
except Exception:
pass
logger.exception("Failed to seed current_stage_table from display or last table")
@staticmethod
def _maybe_apply_pending_pipeline_tail(ctx: Any,
@@ -1290,13 +1261,13 @@ class PipelineExecutor:
if hasattr(ctx, "clear_pending_pipeline_tail"):
ctx.clear_pending_pipeline_tail()
except Exception:
pass
logger.exception("Failed to clear pending pipeline tail after appending pending tail")
else:
try:
if hasattr(ctx, "clear_pending_pipeline_tail"):
ctx.clear_pending_pipeline_tail()
except Exception:
pass
logger.exception("Failed to clear pending pipeline tail (source mismatch branch)")
return stages
def _apply_quiet_background_flag(self, config: Any) -> Any:
@@ -1410,7 +1381,7 @@ class PipelineExecutor:
if isinstance(meta, dict):
_add(meta.get("provider"))
except Exception:
pass
logger.exception("Failed to inspect current_table/table metadata in _maybe_run_class_selector")
for item in selected_items or []:
if isinstance(item, dict):
@@ -1443,7 +1414,7 @@ class PipelineExecutor:
if prefix and is_known_provider_name(prefix):
_add(prefix)
except Exception:
pass
logger.exception("Failed while computing provider prefix heuristics in _maybe_run_class_selector")
if get_provider is not None:
for key in candidates:
@@ -1453,7 +1424,7 @@ class PipelineExecutor:
continue
except Exception:
# If the predicate fails for any reason, fall back to legacy behavior.
pass
logger.exception("is_known_provider_name predicate failed for key %s; falling back", key)
try:
provider = get_provider(key, config)
except Exception:
@@ -1511,7 +1482,7 @@ class PipelineExecutor:
if handled:
return True
except Exception:
pass
logger.exception("Failed while running store-based selector logic in _maybe_run_class_selector")
return False
@@ -1544,7 +1515,7 @@ class PipelineExecutor:
try:
worker_manager.append_stdout(worker_id, text + "\n", channel="log")
except Exception:
pass
logger.exception("Failed to append pipeline event to worker stdout for %s", worker_id)
@staticmethod
def _maybe_open_url_selection(
@@ -1632,7 +1603,7 @@ class PipelineExecutor:
kwargs["output"] = output_fn
ensure_background_notifier(worker_manager, **kwargs)
except Exception:
pass
logger.exception("Failed to enable background notifier for session_worker_ids=%r", session_worker_ids)
@staticmethod
def _get_raw_stage_texts(ctx: Any) -> List[str]:
@@ -1691,7 +1662,7 @@ class PipelineExecutor:
if last_table is not None:
ctx.set_current_stage_table(last_table)
except Exception:
pass
logger.exception("Failed to sync current_stage_table from display/last table in _maybe_apply_initial_selection")
source_cmd = None
source_args_raw = None
@@ -1836,7 +1807,7 @@ class PipelineExecutor:
f"@N expansion: {source_cmd} + selected_args={selected_row_args} + source_args={source_args}",
)
except Exception:
pass
logger.exception("Failed to record pipeline log step for @N expansion (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
stage_table = None
try:
@@ -1939,7 +1910,7 @@ class PipelineExecutor:
continue
seen_track_ids.add(tid)
except Exception:
pass
logger.exception("Failed to extract/parse track metadata in album processing")
track_items.append(tr)
if track_items:
@@ -1969,7 +1940,7 @@ class PipelineExecutor:
f"Applied @N selection {' | '.join(selection_parts)}",
)
except Exception:
pass
logger.exception("Failed to record Applied @N selection log step (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
# Auto-insert downloader stages for provider tables.
try:
@@ -1979,6 +1950,7 @@ class PipelineExecutor:
if current_table is None:
current_table = ctx.get_last_result_table()
except Exception:
logger.exception("Failed to determine current_table for selection auto-insert; defaulting to None")
current_table = None
table_type = None
try:
@@ -1990,6 +1962,7 @@ class PipelineExecutor:
if current_table and hasattr(current_table, "table") else None
)
except Exception:
logger.exception("Failed to compute table_type from current_table; using fallback attribute access")
table_type = (
current_table.table
if current_table and hasattr(current_table, "table") else None
@@ -2100,7 +2073,7 @@ class PipelineExecutor:
try:
print(f"Auto-running selection via {auto_stage[0]}")
except Exception:
pass
logger.exception("Failed to print auto-run selection message for %s", auto_stage[0])
# Append the auto stage now. If the user also provided a selection
# (e.g., @1 | add-file ...), we want to attach the row selection
# args *to the auto-inserted stage* so the download command receives
@@ -2137,44 +2110,54 @@ class PipelineExecutor:
tail = [str(x) for x in inserted[1:]]
stages[-1] = [cmd] + [str(x) for x in row_args] + tail
except Exception:
pass
logger.exception("Failed to attach selection args to auto-inserted stage")
# If no auto stage inserted and there are selection-action tokens available
# for the single selected row, apply it as the pipeline stage so a bare
# `@N` runs the intended action (e.g., get-file for hash-backed rows).
if not stages and selection_indices and len(selection_indices) == 1:
try:
idx = selection_indices[0]
debug(f"@N initial selection idx={idx} last_items={len(ctx.get_last_result_items() or [])}")
row_action = None
try:
row_action = ctx.get_current_stage_table_row_selection_action(idx)
except Exception:
row_action = None
if not row_action:
# If no auto stage inserted and there are selection-action tokens available
# for the single selected row, apply it as the pipeline stage so a bare
# `@N` runs the intended action (e.g., get-file for hash-backed rows).
if not stages and selection_indices and len(selection_indices) == 1:
try:
items = ctx.get_last_result_items() or []
if 0 <= idx < len(items):
maybe = items[idx]
# Provide explicit debug output about the payload selected
try:
if isinstance(maybe, dict):
debug(f"@N payload: hash={maybe.get('hash')} store={maybe.get('store')} _selection_args={maybe.get('_selection_args')} _selection_action={maybe.get('_selection_action')}")
else:
debug(f"@N payload object type: {type(maybe).__name__}")
except Exception:
pass
if isinstance(maybe, dict):
candidate = maybe.get("_selection_action")
if isinstance(candidate, (list, tuple)):
row_action = [str(x) for x in candidate if x is not None]
debug(f"@N restored row_action from payload: {row_action}")
except Exception:
idx = selection_indices[0]
debug(f"@N initial selection idx={idx} last_items={len(ctx.get_last_result_items() or [])}")
row_action = None
try:
row_action = ctx.get_current_stage_table_row_selection_action(idx)
except Exception:
logger.exception("Failed to get current_stage_table row selection action for idx %s", idx)
row_action = None
if row_action:
debug(f"@N applying row action -> {row_action}")
if not row_action:
try:
items = ctx.get_last_result_items() or []
if 0 <= idx < len(items):
maybe = items[idx]
try:
if isinstance(maybe, dict):
debug(f"@N payload: hash={maybe.get('hash')} store={maybe.get('store')} _selection_args={maybe.get('_selection_args')} _selection_action={maybe.get('_selection_action')}")
else:
debug(f"@N payload object type: {type(maybe).__name__}")
except Exception:
logger.exception("Failed to debug selection payload for index %s", idx)
if isinstance(maybe, dict):
candidate = maybe.get("_selection_action")
if isinstance(candidate, (list, tuple)):
row_action = [str(x) for x in candidate if x is not None]
except Exception:
row_action = None
if row_action:
debug(f"@N applying row action -> {row_action}")
stages.append(row_action)
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
pipeline_session.worker_id,
f"@N applied row action -> {' '.join(row_action)}",
)
except Exception:
logger.exception("Failed to record pipeline log step for applied row action (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
except Exception:
logger.exception("Failed to apply single-row selection action")
stages.append(row_action)
if pipeline_session and worker_manager:
try:
@@ -2183,9 +2166,7 @@ class PipelineExecutor:
f"@N applied row action -> {' '.join(row_action)}",
)
except Exception:
pass
except Exception:
pass
logger.exception("Failed to record pipeline log step for applied row action (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
else:
first_cmd = stages[0][0] if stages and stages[0] else None
if isinstance(table_type, str) and table_type.startswith("metadata.") and first_cmd not in (
@@ -2234,7 +2215,7 @@ class PipelineExecutor:
tail = [str(x) for x in inserted[1:]]
stages[0] = [cmd] + [str(x) for x in row_args] + tail
except Exception:
pass
logger.exception("Failed to attach selection args to inserted auto stage (alternate branch)")
# After inserting/appending an auto-stage, continue processing so later
# selection-expansion logic can still run (e.g., for example selectors).
@@ -2304,7 +2285,7 @@ class PipelineExecutor:
continue
i += 1
except Exception:
pass
logger.exception("Failed to inspect add-file stage tokens for potential directory; skipping Live progress")
if not name:
continue
# Display-only: avoid Live progress for relationship viewing.
@@ -2342,7 +2323,7 @@ class PipelineExecutor:
if hasattr(_pipeline_ctx, "set_live_progress"):
_pipeline_ctx.set_live_progress(progress_ui)
except Exception:
pass
logger.exception("Failed to register PipelineLiveProgress with pipeline context")
pipe_index_by_stage = {
stage_idx: pipe_idx
for pipe_idx, stage_idx in enumerate(pipe_stage_indices)
@@ -2366,7 +2347,7 @@ class PipelineExecutor:
if hasattr(ctx, "set_current_stage_table"):
ctx.set_current_stage_table(None)
except Exception:
pass
logger.exception("Failed to clear current_stage_table in execute_tokens")
# Preflight (URL-duplicate prompts, etc.) should be cached within a single
# pipeline run, not across independent pipelines.
@@ -2374,7 +2355,7 @@ class PipelineExecutor:
ctx.store_value("preflight",
{})
except Exception:
pass
logger.exception("Failed to set preflight cache in execute_tokens")
stages = self._split_stages(tokens)
if not stages:
@@ -2482,7 +2463,7 @@ class PipelineExecutor:
try:
ctx.set_last_items(pipe_items)
except Exception:
pass
logger.exception("Failed to set last items after @ selection")
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
@@ -2490,7 +2471,7 @@ class PipelineExecutor:
"@ used last result items"
)
except Exception:
pass
logger.exception("Failed to record pipeline log step for '@ used last result items' (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
continue
subject = ctx.get_last_result_subject()
@@ -2505,7 +2486,7 @@ class PipelineExecutor:
list) else [subject]
ctx.set_last_items(subject_items)
except Exception:
pass
logger.exception("Failed to set last_items from subject during @ handling")
if pipeline_session and worker_manager:
try:
worker_manager.log_step(
@@ -2513,7 +2494,7 @@ class PipelineExecutor:
"@ used current table subject"
)
except Exception:
pass
logger.exception("Failed to record pipeline log step for '@ used current table subject' (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))
continue
if cmd_name.startswith("@"): # selection stage
@@ -2550,7 +2531,7 @@ class PipelineExecutor:
ctx.set_current_stage_table(display_table)
stage_table = display_table
except Exception:
pass
logger.exception("Failed to set current_stage_table from display table during selection processing")
if not stage_table and display_table is not None:
stage_table = display_table
@@ -2561,7 +2542,7 @@ class PipelineExecutor:
if hasattr(ctx, "debug_table_state"):
ctx.debug_table_state(f"selection {selection_token}")
except Exception:
pass
logger.exception("Failed to debug_table_state during selection %s", selection_token)
if display_table is not None and stage_table is display_table:
items_list = ctx.get_last_result_items() or []
@@ -2600,9 +2581,9 @@ class PipelineExecutor:
try:
debug(f"Selection sample object: provider={getattr(sample, 'provider', None)} store={getattr(sample, 'store', None)}")
except Exception:
pass
logger.exception("Failed to debug selection sample object")
except Exception:
pass
logger.exception("Failed to produce selection debug sample for token %s", selection_token)
if not filtered:
print("No items matched selection\n")
@@ -2628,14 +2609,14 @@ class PipelineExecutor:
if base_table is not None and getattr(base_table, "table", None):
new_table.set_table(str(getattr(base_table, "table")))
except Exception:
pass
logger.exception("Failed to set table on new_table for filter overlay")
try:
# Attach a one-line header so users see the active filter.
safe = str(selection_token)[1:].strip()
new_table.set_header_line(f'filter: "{safe}"')
except Exception:
pass
logger.exception("Failed to set header line for filter overlay for token %s", selection_token)
for item in filtered:
new_table.add_result(item)
@@ -2643,15 +2624,15 @@ class PipelineExecutor:
try:
ctx.set_last_result_table_overlay(new_table, items=list(filtered), subject=ctx.get_last_result_subject())
except Exception:
pass
logger.exception("Failed to set last_result_table_overlay for filter selection")
try:
stdout_console().print()
stdout_console().print(new_table)
except Exception:
pass
logger.exception("Failed to render filter overlay to stdout_console")
except Exception:
pass
logger.exception("Failed while rendering filter overlay for selection %s", selection_token)
continue
# UX: selecting a single URL row from get-url tables should open it.
@@ -2667,7 +2648,7 @@ class PipelineExecutor:
stage_is_last=(stage_index + 1 >= len(stages)),
)
except Exception:
pass
logger.exception("Failed to open URL selection for table %s", getattr(current_table, 'table', None))
if PipelineExecutor._maybe_run_class_selector(
ctx,
@@ -2685,6 +2666,7 @@ class PipelineExecutor:
).replace("_",
"-").lower()
except Exception:
logger.exception("Failed to determine next_cmd during selection expansion for stage_index %s", stage_index)
next_cmd = None
def _is_tag_row(obj: Any) -> bool:
@@ -2696,12 +2678,12 @@ class PipelineExecutor:
"tag_name")):
return True
except Exception:
pass
logger.exception("Failed to inspect TagItem object while checking _is_tag_row")
try:
if isinstance(obj, dict) and obj.get("tag_name"):
return True
except Exception:
pass
logger.exception("Failed to inspect dict tag_name while checking _is_tag_row")
return False
if (next_cmd in {"delete-tag",
@@ -2788,7 +2770,7 @@ class PipelineExecutor:
try:
print(f"Auto-running selection via {auto_stage[0]}")
except Exception:
pass
logger.exception("Failed to print auto-run selection message for %s", auto_stage[0])
stages.append(list(auto_stage))
else:
if auto_stage:
@@ -2885,12 +2867,12 @@ class PipelineExecutor:
stdout_console().print()
stdout_console().print(overlay_table)
except Exception:
pass
logger.exception("Failed to render overlay_table to stdout_console")
if session:
try:
session.close()
except Exception:
pass
logger.exception("Failed to close pipeline stage session")
except Exception as exc:
pipeline_status = "failed"
@@ -2907,26 +2889,26 @@ class PipelineExecutor:
try:
progress_ui.complete_all_pipes()
except Exception:
pass
logger.exception("Failed to complete all pipe UI tasks in progress_ui.complete_all_pipes")
try:
progress_ui.stop()
except Exception:
pass
logger.exception("Failed to stop progress_ui")
try:
from SYS import pipeline as _pipeline_ctx
if hasattr(_pipeline_ctx, "set_live_progress"):
_pipeline_ctx.set_live_progress(None)
except Exception:
pass
logger.exception("Failed to clear live_progress on pipeline context")
# Close pipeline session and log final status
try:
if pipeline_session and worker_manager:
pipeline_session.close(status=pipeline_status, error_msg=pipeline_error)
except Exception:
pass
logger.exception("Failed to close pipeline session during finalization")
try:
if pipeline_session and worker_manager:
self._log_pipeline_event(worker_manager, pipeline_session.worker_id,
f"Pipeline {pipeline_status}: {pipeline_error or ''}")
except Exception:
pass
logger.exception("Failed to log final pipeline status (pipeline_session=%r)", getattr(pipeline_session, 'worker_id', None))