jkj
This commit is contained in:
@@ -38,7 +38,18 @@ from models import DownloadError, DownloadOptions, DownloadMediaResult, DebugLog
|
||||
import pipeline as pipeline_context
|
||||
from result_table import ResultTable
|
||||
|
||||
from ._shared import Cmdlet, CmdletArg, SharedArgs, create_pipe_object_result, parse_cmdlet_args, register_url_with_local_library, coerce_to_pipe_object
|
||||
from tool.ytdlp import YtDlpTool
|
||||
|
||||
from . import _shared as sh
|
||||
|
||||
Cmdlet = sh.Cmdlet
|
||||
CmdletArg = sh.CmdletArg
|
||||
SharedArgs = sh.SharedArgs
|
||||
create_pipe_object_result = sh.create_pipe_object_result
|
||||
parse_cmdlet_args = sh.parse_cmdlet_args
|
||||
register_url_with_local_library = sh.register_url_with_local_library
|
||||
coerce_to_pipe_object = sh.coerce_to_pipe_object
|
||||
get_field = sh.get_field
|
||||
|
||||
|
||||
# Minimal inlined helpers from helper/download.py (is_url_supported_by_ytdlp, list_formats)
|
||||
@@ -62,6 +73,136 @@ _EXTRACTOR_CACHE: List[Any] | None = None
|
||||
_YTDLP_PROGRESS_BAR = ProgressBar()
|
||||
|
||||
|
||||
_SUBTITLE_EXTS = (".vtt", ".srt", ".ass", ".ssa", ".lrc")
|
||||
|
||||
|
||||
def _format_chapters_note(info: Dict[str, Any]) -> Optional[str]:
|
||||
"""Format yt-dlp chapter metadata into a stable, note-friendly text.
|
||||
|
||||
Output is one chapter per line, e.g.:
|
||||
00:00 Intro
|
||||
01:23-02:10 Topic name
|
||||
"""
|
||||
try:
|
||||
chapters = info.get("chapters")
|
||||
except Exception:
|
||||
chapters = None
|
||||
|
||||
if not isinstance(chapters, list) or not chapters:
|
||||
return None
|
||||
|
||||
rows: List[tuple[int, Optional[int], str]] = []
|
||||
max_t = 0
|
||||
for ch in chapters:
|
||||
if not isinstance(ch, dict):
|
||||
continue
|
||||
start_raw = ch.get("start_time")
|
||||
end_raw = ch.get("end_time")
|
||||
title_raw = ch.get("title") or ch.get("name") or ch.get("chapter")
|
||||
|
||||
try:
|
||||
start_s = int(float(start_raw))
|
||||
except Exception:
|
||||
continue
|
||||
|
||||
end_s: Optional[int] = None
|
||||
try:
|
||||
if end_raw is not None:
|
||||
end_s = int(float(end_raw))
|
||||
except Exception:
|
||||
end_s = None
|
||||
|
||||
title = str(title_raw).strip() if title_raw is not None else ""
|
||||
rows.append((start_s, end_s, title))
|
||||
try:
|
||||
max_t = max(max_t, start_s, end_s or 0)
|
||||
except Exception:
|
||||
max_t = max(max_t, start_s)
|
||||
|
||||
if not rows:
|
||||
return None
|
||||
|
||||
force_hours = bool(max_t >= 3600)
|
||||
|
||||
def _tc(seconds: int) -> str:
|
||||
total = max(0, int(seconds))
|
||||
minutes, secs = divmod(total, 60)
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
if force_hours:
|
||||
return f"{hours:02d}:{minutes:02d}:{secs:02d}"
|
||||
return f"{minutes:02d}:{secs:02d}"
|
||||
|
||||
lines: List[str] = []
|
||||
for start_s, end_s, title in sorted(rows, key=lambda r: (r[0], r[1] if r[1] is not None else 10**9, r[2])):
|
||||
if end_s is not None and end_s > start_s:
|
||||
prefix = f"{_tc(start_s)}-{_tc(end_s)}"
|
||||
else:
|
||||
prefix = _tc(start_s)
|
||||
line = f"{prefix} {title}".strip()
|
||||
if line:
|
||||
lines.append(line)
|
||||
|
||||
text = "\n".join(lines).strip()
|
||||
return text or None
|
||||
|
||||
|
||||
def _best_subtitle_sidecar(media_path: Path) -> Optional[Path]:
|
||||
"""Find the most likely subtitle sidecar file for a downloaded media file."""
|
||||
try:
|
||||
base_dir = media_path.parent
|
||||
stem = media_path.stem
|
||||
if not stem:
|
||||
return None
|
||||
|
||||
candidates: List[Path] = []
|
||||
for p in base_dir.glob(stem + ".*"):
|
||||
try:
|
||||
if not p.is_file():
|
||||
continue
|
||||
except Exception:
|
||||
continue
|
||||
if p.suffix.lower() in _SUBTITLE_EXTS:
|
||||
candidates.append(p)
|
||||
|
||||
if not candidates:
|
||||
return None
|
||||
|
||||
def _rank(path: Path) -> tuple[int, int, float, str]:
|
||||
name = path.name.lower()
|
||||
lang_rank = 0 if ".en." in name or name.endswith(".en" + path.suffix.lower()) else 1
|
||||
ext = path.suffix.lower()
|
||||
ext_rank_map = {".vtt": 0, ".srt": 1, ".ass": 2, ".ssa": 3, ".lrc": 4}
|
||||
ext_rank = ext_rank_map.get(ext, 9)
|
||||
try:
|
||||
mtime = float(path.stat().st_mtime)
|
||||
except Exception:
|
||||
mtime = 0.0
|
||||
return (lang_rank, ext_rank, -mtime, name)
|
||||
|
||||
candidates.sort(key=_rank)
|
||||
return candidates[0]
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _read_text_file(path: Path, *, max_bytes: int = 1_500_000) -> Optional[str]:
|
||||
try:
|
||||
data = path.read_bytes()
|
||||
except Exception:
|
||||
return None
|
||||
if not data:
|
||||
return None
|
||||
if len(data) > max_bytes:
|
||||
data = data[:max_bytes]
|
||||
try:
|
||||
return data.decode("utf-8", errors="replace")
|
||||
except Exception:
|
||||
try:
|
||||
return data.decode(errors="replace")
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
|
||||
def _ensure_yt_dlp_ready() -> None:
|
||||
if yt_dlp is not None:
|
||||
return
|
||||
@@ -100,16 +241,26 @@ def list_formats(url: str, no_playlist: bool = False, playlist_items: Optional[s
|
||||
ydl_opts["noplaylist"] = True
|
||||
if playlist_items:
|
||||
ydl_opts["playlist_items"] = playlist_items
|
||||
|
||||
debug(f"Fetching format list for: {url}")
|
||||
with yt_dlp.YoutubeDL(ydl_opts) as ydl: # type: ignore[arg-type]
|
||||
debug(f"Fetching format list for: {url}")
|
||||
info = ydl.extract_info(url, download=False)
|
||||
formats = info.get("formats", [])
|
||||
if not formats:
|
||||
log("No formats available", file=sys.stderr)
|
||||
return None
|
||||
result_formats = []
|
||||
for fmt in formats:
|
||||
result_formats.append({
|
||||
|
||||
if not isinstance(info, dict):
|
||||
log("No formats available", file=sys.stderr)
|
||||
return None
|
||||
|
||||
formats = info.get("formats") or []
|
||||
if not isinstance(formats, list) or not formats:
|
||||
log("No formats available", file=sys.stderr)
|
||||
return None
|
||||
|
||||
result_formats: List[Dict[str, Any]] = []
|
||||
for fmt in formats:
|
||||
if not isinstance(fmt, dict):
|
||||
continue
|
||||
result_formats.append(
|
||||
{
|
||||
"format_id": fmt.get("format_id", ""),
|
||||
"format": fmt.get("format", ""),
|
||||
"ext": fmt.get("ext", ""),
|
||||
@@ -122,9 +273,11 @@ def list_formats(url: str, no_playlist: bool = False, playlist_items: Optional[s
|
||||
"filesize": fmt.get("filesize"),
|
||||
"abr": fmt.get("abr"),
|
||||
"tbr": fmt.get("tbr"),
|
||||
})
|
||||
debug(f"Found {len(result_formats)} available formats")
|
||||
return result_formats
|
||||
}
|
||||
)
|
||||
|
||||
debug(f"Found {len(result_formats)} available formats")
|
||||
return result_formats or None
|
||||
except Exception as e:
|
||||
log(f"✗ Error fetching formats: {e}", file=sys.stderr)
|
||||
return None
|
||||
@@ -215,6 +368,31 @@ def _download_with_sections_via_cli(url: str, ytdl_options: Dict[str, Any], sect
|
||||
cmd = ["yt-dlp"]
|
||||
if ytdl_options.get("format"):
|
||||
cmd.extend(["-f", ytdl_options["format"]])
|
||||
if ytdl_options.get("merge_output_format"):
|
||||
cmd.extend(["--merge-output-format", str(ytdl_options["merge_output_format"])])
|
||||
|
||||
# For CLI downloads, infer chapter/metadata embedding from either legacy flags
|
||||
# or explicit FFmpegMetadata postprocessor entries.
|
||||
postprocessors = ytdl_options.get("postprocessors")
|
||||
want_add_metadata = bool(ytdl_options.get("addmetadata"))
|
||||
want_embed_chapters = bool(ytdl_options.get("embedchapters"))
|
||||
if isinstance(postprocessors, list):
|
||||
for pp in postprocessors:
|
||||
if not isinstance(pp, dict):
|
||||
continue
|
||||
if str(pp.get("key") or "") == "FFmpegMetadata":
|
||||
want_add_metadata = True
|
||||
if bool(pp.get("add_chapters", True)):
|
||||
want_embed_chapters = True
|
||||
|
||||
if want_add_metadata:
|
||||
cmd.append("--add-metadata")
|
||||
if want_embed_chapters:
|
||||
cmd.append("--embed-chapters")
|
||||
if ytdl_options.get("writesubtitles"):
|
||||
cmd.append("--write-sub")
|
||||
cmd.append("--write-auto-sub")
|
||||
cmd.extend(["--sub-format", "vtt"])
|
||||
if ytdl_options.get("force_keyframes_at_cuts"):
|
||||
cmd.extend(["--force-keyframes-at-cuts"]) if ytdl_options.get("force_keyframes_at_cuts") else None
|
||||
cmd.extend(["-o", section_outtmpl])
|
||||
@@ -258,11 +436,6 @@ def _build_ytdlp_options(opts: DownloadOptions) -> Dict[str, Any]:
|
||||
|
||||
if opts.cookies_path and opts.cookies_path.is_file():
|
||||
base_options["cookiefile"] = str(opts.cookies_path)
|
||||
else:
|
||||
from hydrus_health_check import get_cookies_file_path # local import
|
||||
global_cookies = get_cookies_file_path()
|
||||
if global_cookies:
|
||||
base_options["cookiefile"] = global_cookies
|
||||
|
||||
if opts.no_playlist:
|
||||
base_options["noplaylist"] = True
|
||||
@@ -274,6 +447,37 @@ def _build_ytdlp_options(opts: DownloadOptions) -> Dict[str, Any]:
|
||||
base_options["format"] = opts.ytdl_format or "bestvideo+bestaudio/best"
|
||||
base_options["format_sort"] = ["res:4320", "res:2880", "res:2160", "res:1440", "res:1080", "res:720", "res"]
|
||||
|
||||
# Optional yt-dlp features
|
||||
if getattr(opts, "embed_chapters", False):
|
||||
# Prefer explicit FFmpegMetadata PP so chapter embedding runs even when
|
||||
# we already specified other postprocessors (e.g. FFmpegExtractAudio).
|
||||
pps = base_options.get("postprocessors")
|
||||
if not isinstance(pps, list):
|
||||
pps = []
|
||||
already_has_metadata = any(
|
||||
isinstance(pp, dict) and str(pp.get("key") or "") == "FFmpegMetadata" for pp in pps
|
||||
)
|
||||
if not already_has_metadata:
|
||||
pps.append(
|
||||
{
|
||||
"key": "FFmpegMetadata",
|
||||
"add_metadata": True,
|
||||
"add_chapters": True,
|
||||
"add_infojson": "if_exists",
|
||||
}
|
||||
)
|
||||
base_options["postprocessors"] = pps
|
||||
|
||||
# Chapter embedding is most reliable in mkv/mp4 containers.
|
||||
# When merging separate video+audio streams, prefer mkv so mpv sees chapters.
|
||||
if opts.mode != "audio":
|
||||
base_options.setdefault("merge_output_format", "mkv")
|
||||
|
||||
if getattr(opts, "write_sub", False):
|
||||
base_options["writesubtitles"] = True
|
||||
base_options["writeautomaticsub"] = True
|
||||
base_options["subtitlesformat"] = "vtt"
|
||||
|
||||
if opts.clip_sections:
|
||||
sections: List[str] = []
|
||||
|
||||
@@ -410,13 +614,27 @@ def _get_libgen_download_url(libgen_url: str) -> Optional[str]:
|
||||
response = session.get(libgen_url, timeout=10, allow_redirects=True)
|
||||
final_url = response.url
|
||||
try:
|
||||
from bs4 import BeautifulSoup
|
||||
soup = BeautifulSoup(response.content, 'html.parser')
|
||||
for link in soup.find_all('a'):
|
||||
href = link.get('href')
|
||||
if href and 'get.php' in href:
|
||||
return urljoin(libgen_url, href)
|
||||
except ImportError:
|
||||
try:
|
||||
from lxml import html as lxml_html
|
||||
except ImportError:
|
||||
lxml_html = None
|
||||
|
||||
if lxml_html is not None:
|
||||
doc = lxml_html.fromstring(response.content)
|
||||
for a in doc.xpath("//a[@href]"):
|
||||
href = str(a.get("href") or "").strip()
|
||||
if href and "get.php" in href.lower():
|
||||
return urljoin(final_url, href)
|
||||
else:
|
||||
for m in re.finditer(
|
||||
r"href=[\"\']([^\"\']+)[\"\']",
|
||||
response.text or "",
|
||||
flags=re.IGNORECASE,
|
||||
):
|
||||
href = str(m.group(1) or "").strip()
|
||||
if href and "get.php" in href.lower():
|
||||
return urljoin(final_url, href)
|
||||
except Exception:
|
||||
pass
|
||||
if final_url != libgen_url:
|
||||
debug(f"LibGen resolved to mirror: {final_url}")
|
||||
@@ -648,7 +866,7 @@ def _download_direct_file(
|
||||
raise DownloadError(f"Error downloading file: {exc}") from exc
|
||||
|
||||
|
||||
def probe_url(url: str, no_playlist: bool = False, timeout_seconds: int = 15) -> Optional[Dict[str, Any]]:
|
||||
def probe_url(url: str, no_playlist: bool = False, timeout_seconds: int = 15, *, cookiefile: Optional[str] = None) -> Optional[Dict[str, Any]]:
|
||||
"""Probe URL to extract metadata WITHOUT downloading.
|
||||
|
||||
Args:
|
||||
@@ -686,12 +904,8 @@ def probe_url(url: str, no_playlist: bool = False, timeout_seconds: int = 15) ->
|
||||
"noprogress": True, # No progress bars
|
||||
}
|
||||
|
||||
# Add cookies if available (lazy import to avoid circular dependency)
|
||||
from hydrus_health_check import get_cookies_file_path # local import
|
||||
|
||||
global_cookies = get_cookies_file_path()
|
||||
if global_cookies:
|
||||
ydl_opts["cookiefile"] = global_cookies
|
||||
if cookiefile:
|
||||
ydl_opts["cookiefile"] = str(cookiefile)
|
||||
|
||||
# Add no_playlist option if specified
|
||||
if no_playlist:
|
||||
@@ -807,7 +1021,14 @@ def download_media(
|
||||
debug(f"Skipping probe for playlist (item selection: {opts.playlist_items}), proceeding with download")
|
||||
probe_result = {"url": opts.url} # Minimal probe result
|
||||
else:
|
||||
probe_result = probe_url(opts.url, no_playlist=opts.no_playlist, timeout_seconds=15)
|
||||
probe_cookiefile = None
|
||||
try:
|
||||
if opts.cookies_path and opts.cookies_path.is_file():
|
||||
probe_cookiefile = str(opts.cookies_path)
|
||||
except Exception:
|
||||
probe_cookiefile = None
|
||||
|
||||
probe_result = probe_url(opts.url, no_playlist=opts.no_playlist, timeout_seconds=15, cookiefile=probe_cookiefile)
|
||||
|
||||
if probe_result is None:
|
||||
if not opts.quiet:
|
||||
@@ -1182,6 +1403,8 @@ class Download_Media(Cmdlet):
|
||||
try:
|
||||
debug("Starting download-media")
|
||||
|
||||
ytdlp_tool = YtDlpTool(config)
|
||||
|
||||
# Parse arguments
|
||||
parsed = parse_cmdlet_args(args, self)
|
||||
|
||||
@@ -1192,7 +1415,6 @@ class Download_Media(Cmdlet):
|
||||
|
||||
# If no url provided via args, try to extract from piped result
|
||||
if not raw_url and result:
|
||||
from ._shared import get_field
|
||||
# Handle single result or list of results
|
||||
results_to_check = result if isinstance(result, list) else [result]
|
||||
for item in results_to_check:
|
||||
@@ -1226,6 +1448,10 @@ class Download_Media(Cmdlet):
|
||||
# Get other options
|
||||
clip_spec = parsed.get("clip")
|
||||
|
||||
# Always enable chapters + subtitles so downstream pipes (e.g. mpv) can consume them.
|
||||
embed_chapters = True
|
||||
write_sub = True
|
||||
|
||||
mode = "audio" if parsed.get("audio") else "video"
|
||||
|
||||
# Parse clip range(s) if specified
|
||||
@@ -1379,7 +1605,14 @@ class Download_Media(Cmdlet):
|
||||
if playlist_items:
|
||||
return str(requested_url)
|
||||
try:
|
||||
pr = probe_url(requested_url, no_playlist=False, timeout_seconds=15)
|
||||
cf = None
|
||||
try:
|
||||
cookie_path = ytdlp_tool.resolve_cookiefile()
|
||||
if cookie_path is not None and cookie_path.is_file():
|
||||
cf = str(cookie_path)
|
||||
except Exception:
|
||||
cf = None
|
||||
pr = probe_url(requested_url, no_playlist=False, timeout_seconds=15, cookiefile=cf)
|
||||
if isinstance(pr, dict):
|
||||
for key in ("webpage_url", "original_url", "url", "requested_url"):
|
||||
value = pr.get(key)
|
||||
@@ -1458,7 +1691,14 @@ class Download_Media(Cmdlet):
|
||||
- selected_urls: Optional[List[str]] (expanded per-entry urls when available)
|
||||
"""
|
||||
try:
|
||||
pr = probe_url(url, no_playlist=False, timeout_seconds=15)
|
||||
cf = None
|
||||
try:
|
||||
cookie_path = ytdlp_tool.resolve_cookiefile()
|
||||
if cookie_path is not None and cookie_path.is_file():
|
||||
cf = str(cookie_path)
|
||||
except Exception:
|
||||
cf = None
|
||||
pr = probe_url(url, no_playlist=False, timeout_seconds=15, cookiefile=cf)
|
||||
except Exception:
|
||||
pr = None
|
||||
if not isinstance(pr, dict):
|
||||
@@ -1685,6 +1925,15 @@ class Download_Media(Cmdlet):
|
||||
acodec = fmt.get("acodec", "none")
|
||||
filesize = fmt.get("filesize")
|
||||
format_id = fmt.get("format_id", "")
|
||||
|
||||
# If the chosen format is video-only (no audio stream), automatically
|
||||
# request best audio too so the resulting file has sound.
|
||||
selection_format_id = format_id
|
||||
try:
|
||||
if vcodec != "none" and acodec == "none" and format_id:
|
||||
selection_format_id = f"{format_id}+ba"
|
||||
except Exception:
|
||||
selection_format_id = format_id
|
||||
|
||||
# Format size
|
||||
size_str = ""
|
||||
@@ -1729,9 +1978,9 @@ class Download_Media(Cmdlet):
|
||||
"full_metadata": {
|
||||
"format_id": format_id,
|
||||
"url": url,
|
||||
"item_selector": format_id,
|
||||
"item_selector": selection_format_id,
|
||||
},
|
||||
"_selection_args": ["-format", format_id]
|
||||
"_selection_args": ["-format", selection_format_id]
|
||||
}
|
||||
|
||||
# Add to results list and table (don't emit - formats should wait for @N selection)
|
||||
@@ -1778,23 +2027,57 @@ class Download_Media(Cmdlet):
|
||||
actual_format = playlist_items
|
||||
actual_playlist_items = None
|
||||
|
||||
# Auto-pick best audio format when -audio is used and no explicit format is given.
|
||||
# For -audio, default to yt-dlp's built-in bestaudio selector.
|
||||
# This should *not* require interactive format picking.
|
||||
if mode == "audio" and not actual_format:
|
||||
chosen = None
|
||||
formats = list_formats(url, no_playlist=False, playlist_items=actual_playlist_items)
|
||||
if formats:
|
||||
chosen = _pick_best_audio_format_id(formats)
|
||||
actual_format = chosen or "bestaudio/best"
|
||||
actual_format = "bestaudio"
|
||||
|
||||
# If no explicit format is provided for video mode, allow a config override.
|
||||
if mode == "video" and not actual_format:
|
||||
configured = (ytdlp_tool.default_format("video") or "").strip()
|
||||
if configured and configured != "bestvideo+bestaudio/best":
|
||||
actual_format = configured
|
||||
|
||||
# If a single format id was chosen and it is video-only, auto-merge best audio.
|
||||
if (
|
||||
actual_format
|
||||
and isinstance(actual_format, str)
|
||||
and mode != "audio"
|
||||
and "+" not in actual_format
|
||||
and "/" not in actual_format
|
||||
and "[" not in actual_format
|
||||
and actual_format not in {"best", "bv", "ba", "b"}
|
||||
):
|
||||
try:
|
||||
formats = list_formats(url, no_playlist=False, playlist_items=actual_playlist_items)
|
||||
if formats:
|
||||
fmt_match = next(
|
||||
(f for f in formats if str(f.get("format_id", "")) == actual_format),
|
||||
None,
|
||||
)
|
||||
if fmt_match:
|
||||
vcodec = str(fmt_match.get("vcodec", "none"))
|
||||
acodec = str(fmt_match.get("acodec", "none"))
|
||||
if vcodec != "none" and acodec == "none":
|
||||
debug(
|
||||
f"Selected video-only format {actual_format}; using {actual_format}+ba for audio"
|
||||
)
|
||||
actual_format = f"{actual_format}+ba"
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
opts = DownloadOptions(
|
||||
url=url,
|
||||
mode=mode,
|
||||
output_dir=final_output_dir,
|
||||
ytdl_format=actual_format,
|
||||
cookies_path=ytdlp_tool.resolve_cookiefile(),
|
||||
clip_sections=clip_sections_spec,
|
||||
playlist_items=actual_playlist_items,
|
||||
quiet=quiet_mode,
|
||||
no_playlist=False,
|
||||
embed_chapters=embed_chapters,
|
||||
write_sub=write_sub,
|
||||
)
|
||||
|
||||
# Use timeout wrapper to prevent hanging
|
||||
@@ -1838,7 +2121,40 @@ class Download_Media(Cmdlet):
|
||||
# Build PipeObjects first so we can attach cross-clip relationships.
|
||||
pipe_objects: List[Dict[str, Any]] = []
|
||||
for downloaded in results_to_emit:
|
||||
pipe_objects.append(self._build_pipe_object(downloaded, url, opts))
|
||||
po = self._build_pipe_object(downloaded, url, opts)
|
||||
|
||||
# Attach chapter timestamps for downstream consumers (e.g., mpv scripts)
|
||||
# even if container embedding fails.
|
||||
try:
|
||||
info = downloaded.info if isinstance(getattr(downloaded, "info", None), dict) else {}
|
||||
except Exception:
|
||||
info = {}
|
||||
chapters_text = _format_chapters_note(info) if embed_chapters else None
|
||||
if chapters_text:
|
||||
notes = po.get("notes")
|
||||
if not isinstance(notes, dict):
|
||||
notes = {}
|
||||
notes.setdefault("chapters", chapters_text)
|
||||
po["notes"] = notes
|
||||
|
||||
if write_sub:
|
||||
try:
|
||||
media_path = Path(str(po.get("path") or ""))
|
||||
except Exception:
|
||||
media_path = None
|
||||
|
||||
if media_path is not None and media_path.exists() and media_path.is_file():
|
||||
sub_path = _best_subtitle_sidecar(media_path)
|
||||
if sub_path is not None:
|
||||
sub_text = _read_text_file(sub_path)
|
||||
if sub_text:
|
||||
notes = po.get("notes")
|
||||
if not isinstance(notes, dict):
|
||||
notes = {}
|
||||
notes["sub"] = sub_text
|
||||
po["notes"] = notes
|
||||
|
||||
pipe_objects.append(po)
|
||||
|
||||
# If this is a clip download, decorate titles/tags so the title: tag is clip-based.
|
||||
# Relationship tags are only added when multiple clips exist.
|
||||
@@ -1868,6 +2184,95 @@ class Download_Media(Cmdlet):
|
||||
debug("✓ Downloaded and emitted")
|
||||
|
||||
except DownloadError as e:
|
||||
# Special-case yt-dlp format errors: show a selectable format list table so
|
||||
# the user can pick a working format_id and continue the pipeline via @N.
|
||||
cause = getattr(e, "__cause__", None)
|
||||
detail = ""
|
||||
try:
|
||||
detail = str(cause or "")
|
||||
except Exception:
|
||||
detail = ""
|
||||
|
||||
if "requested format is not available" in (detail or "").lower() and mode != "audio":
|
||||
formats = list_formats(url, no_playlist=False, playlist_items=actual_playlist_items)
|
||||
if formats:
|
||||
formats_to_show = formats
|
||||
|
||||
table = ResultTable()
|
||||
table.title = f"Available formats for {url}"
|
||||
table.set_source_command("download-media", [str(a) for a in (args or [])])
|
||||
|
||||
results_list: List[Dict[str, Any]] = []
|
||||
for idx, fmt in enumerate(formats_to_show, 1):
|
||||
resolution = fmt.get("resolution", "")
|
||||
ext = fmt.get("ext", "")
|
||||
vcodec = fmt.get("vcodec", "none")
|
||||
acodec = fmt.get("acodec", "none")
|
||||
filesize = fmt.get("filesize")
|
||||
format_id = fmt.get("format_id", "")
|
||||
|
||||
selection_format_id = format_id
|
||||
try:
|
||||
if vcodec != "none" and acodec == "none" and format_id:
|
||||
selection_format_id = f"{format_id}+ba"
|
||||
except Exception:
|
||||
selection_format_id = format_id
|
||||
|
||||
size_str = ""
|
||||
if filesize:
|
||||
try:
|
||||
size_mb = float(filesize) / (1024 * 1024)
|
||||
size_str = f"{size_mb:.1f}MB"
|
||||
except Exception:
|
||||
size_str = ""
|
||||
|
||||
desc_parts: List[str] = []
|
||||
if resolution and resolution != "audio only":
|
||||
desc_parts.append(str(resolution))
|
||||
if ext:
|
||||
desc_parts.append(str(ext).upper())
|
||||
if vcodec != "none":
|
||||
desc_parts.append(f"v:{vcodec}")
|
||||
if acodec != "none":
|
||||
desc_parts.append(f"a:{acodec}")
|
||||
if size_str:
|
||||
desc_parts.append(size_str)
|
||||
format_desc = " | ".join(desc_parts)
|
||||
|
||||
format_dict: Dict[str, Any] = {
|
||||
"table": "download-media",
|
||||
"title": f"Format {format_id}",
|
||||
"url": url,
|
||||
"target": url,
|
||||
"detail": format_desc,
|
||||
"media_kind": "format",
|
||||
"columns": [
|
||||
("#", str(idx)),
|
||||
("ID", format_id),
|
||||
("Resolution", resolution or "N/A"),
|
||||
("Ext", ext),
|
||||
("Video", vcodec),
|
||||
("Audio", acodec),
|
||||
("Size", size_str or "N/A"),
|
||||
],
|
||||
"full_metadata": {
|
||||
"format_id": format_id,
|
||||
"url": url,
|
||||
"item_selector": selection_format_id,
|
||||
},
|
||||
"_selection_args": ["-format", selection_format_id],
|
||||
}
|
||||
|
||||
results_list.append(format_dict)
|
||||
table.add_result(format_dict)
|
||||
|
||||
pipeline_context.set_current_stage_table(table)
|
||||
pipeline_context.set_last_result_table(table, results_list)
|
||||
|
||||
# Returning 0 with no emits lets the CLI pause the pipeline for @N selection.
|
||||
log("Requested format is not available; select a working format with @N", file=sys.stderr)
|
||||
return 0
|
||||
|
||||
log(f"Download failed for {url}: {e}", file=sys.stderr)
|
||||
except Exception as e:
|
||||
log(f"Error processing {url}: {e}", file=sys.stderr)
|
||||
|
||||
Reference in New Issue
Block a user