This commit is contained in:
2025-12-27 06:05:07 -08:00
parent 71b542ae91
commit 8d8a2637d5
9 changed files with 943 additions and 23 deletions

View File

@@ -159,7 +159,9 @@ class HydrusNetwork:
from models import ProgressBar from models import ProgressBar
bar = ProgressBar() bar = ProgressBar()
label = f"{self._log_prefix().strip('[]')} upload" # Keep the PipelineLiveProgress transfer line clean: show the file name.
# (The hydrus instance/service is already visible in the logs above.)
label = str(getattr(file_path, "name", None) or "upload")
start_t = time.time() start_t = time.time()
last_render_t = [start_t] last_render_t = [start_t]
sent = [0] sent = [0]

128
CLI.py
View File

@@ -1052,6 +1052,28 @@ class CmdletExecutor:
if cmd_name_norm in {"get-relationship", "get-rel", ".pipe", ".matrix", ".telegram", "telegram", "delete-file", "del-file"}: if cmd_name_norm in {"get-relationship", "get-rel", ".pipe", ".matrix", ".telegram", "telegram", "delete-file", "del-file"}:
return return
# add-file directory selector mode: show only the selection table, no Live progress.
if cmd_name_norm in {"add-file", "add_file"}:
try:
from pathlib import Path as _Path
toks = list(filtered_args or [])
i = 0
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
if low in {"-path", "--path", "-p"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and ("," not in nxt):
p = _Path(nxt)
if p.exists() and p.is_dir():
return
i += 2
continue
i += 1
except Exception:
pass
try: try:
quiet_mode = bool(config.get("_quiet_background_output")) if isinstance(config, dict) else False quiet_mode = bool(config.get("_quiet_background_output")) if isinstance(config, dict) else False
except Exception: except Exception:
@@ -1097,6 +1119,20 @@ class CmdletExecutor:
while i < len(toks): while i < len(toks):
t = str(toks[i]) t = str(toks[i])
low = t.lower().strip() low = t.lower().strip()
if cmd_name_norm in {"add-file", "add_file"} and low in {"-path", "--path", "-p"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt:
if "," in nxt:
parts = [p.strip().strip('"\'') for p in nxt.split(",")]
parts = [p for p in parts if p]
if parts:
preview.extend(parts)
i += 2
continue
else:
preview.append(nxt)
i += 2
continue
if low in {"-url", "--url"} and i + 1 < len(toks): if low in {"-url", "--url"} and i + 1 < len(toks):
nxt = str(toks[i + 1]) nxt = str(toks[i + 1])
if nxt and not nxt.startswith("-"): if nxt and not nxt.startswith("-"):
@@ -1845,15 +1881,49 @@ class PipelineExecutor:
else: else:
selected_row_args: List[str] = [] selected_row_args: List[str] = []
skip_pipe_expansion = source_cmd == ".pipe" and len(stages) > 0 skip_pipe_expansion = source_cmd == ".pipe" and len(stages) > 0
# Only perform @N command expansion for *single-item* selections. # Command expansion via @N:
# For multi-item selections (e.g. @*, @1-5), expanding to a single # - Default behavior: expand ONLY for single-row selections.
# row would silently drop items. In those cases we pipe the selected # - Special case: allow multi-row expansion for add-file directory tables by
# items downstream instead. # combining selected rows into a single `-path file1,file2,...` argument.
if source_cmd and not skip_pipe_expansion and len(selection_indices) == 1: if source_cmd and not skip_pipe_expansion:
idx = selection_indices[0] src = str(source_cmd).replace("_", "-").strip().lower()
row_args = ctx.get_current_stage_table_row_selection_args(idx)
if row_args: if src == "add-file" and selection_indices:
selected_row_args.extend(row_args) row_args_list: List[List[str]] = []
for idx in selection_indices:
try:
row_args = ctx.get_current_stage_table_row_selection_args(idx)
except Exception:
row_args = None
if isinstance(row_args, list) and row_args:
row_args_list.append([str(x) for x in row_args if x is not None])
# Combine `['-path', <file>]` from each row into one `-path` arg.
paths: List[str] = []
can_merge = bool(row_args_list) and (len(row_args_list) == len(selection_indices))
if can_merge:
for ra in row_args_list:
if len(ra) == 2 and str(ra[0]).strip().lower() in {"-path", "--path", "-p"}:
p = str(ra[1]).strip()
if p:
paths.append(p)
else:
can_merge = False
break
if can_merge and paths:
selected_row_args.extend(["-path", ",".join(paths)])
elif len(selection_indices) == 1 and row_args_list:
selected_row_args.extend(row_args_list[0])
else:
# Only perform @N command expansion for *single-item* selections.
# For multi-item selections (e.g. @*, @1-5), expanding to one row
# would silently drop items. In those cases we pipe items downstream.
if len(selection_indices) == 1:
idx = selection_indices[0]
row_args = ctx.get_current_stage_table_row_selection_args(idx)
if row_args:
selected_row_args.extend(row_args)
if selected_row_args: if selected_row_args:
if isinstance(source_cmd, list): if isinstance(source_cmd, list):
@@ -2026,6 +2096,32 @@ class PipelineExecutor:
name = str(stage_tokens[0]).replace("_", "-").lower() name = str(stage_tokens[0]).replace("_", "-").lower()
if name == "@" or name.startswith("@"): if name == "@" or name.startswith("@"):
continue continue
# add-file directory selector stage: avoid Live progress so the
# selection table renders cleanly.
if name in {"add-file", "add_file"}:
try:
from pathlib import Path as _Path
toks = list(stage_tokens[1:])
i = 0
while i < len(toks):
t = str(toks[i])
low = t.lower().strip()
if low in {"-path", "--path", "-p"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt and ("," not in nxt):
p = _Path(nxt)
if p.exists() and p.is_dir():
name = "" # mark as skipped
break
i += 2
continue
i += 1
except Exception:
pass
if not name:
continue
# Display-only: avoid Live progress for relationship viewing. # Display-only: avoid Live progress for relationship viewing.
# This keeps `@1 | get-relationship` clean and prevents progress UI # This keeps `@1 | get-relationship` clean and prevents progress UI
# from interfering with Rich tables/panels. # from interfering with Rich tables/panels.
@@ -2352,6 +2448,20 @@ class PipelineExecutor:
while i < len(toks): while i < len(toks):
t = str(toks[i]) t = str(toks[i])
low = t.lower().strip() low = t.lower().strip()
if cmd_name == "add-file" and low in {"-path", "--path", "-p"} and i + 1 < len(toks):
nxt = str(toks[i + 1])
if nxt:
if "," in nxt:
parts = [p.strip().strip('"\'') for p in nxt.split(",")]
parts = [p for p in parts if p]
if parts:
preview.extend(parts)
i += 2
continue
else:
preview.append(nxt)
i += 2
continue
if low in {"-url", "--url"} and i + 1 < len(toks): if low in {"-url", "--url"} and i + 1 < len(toks):
nxt = str(toks[i + 1]) nxt = str(toks[i + 1])
if nxt and not nxt.startswith("-"): if nxt and not nxt.startswith("-"):

View File

@@ -688,9 +688,114 @@ local function _reset_pan_zoom()
_show_image_status('Zoom reset') _show_image_status('Zoom reset')
end end
local function _sanitize_filename_component(s)
s = trim(tostring(s or ''))
if s == '' then
return 'screenshot'
end
-- Windows-unfriendly characters: <>:"/\|?* and control chars
s = s:gsub('[%c]', '')
s = s:gsub('[<>:"/\\|%?%*]', '_')
s = trim(s)
s = s:gsub('[%.%s]+$', '')
if s == '' then
return 'screenshot'
end
return s
end
local function _strip_title_extension(title, path)
title = trim(tostring(title or ''))
if title == '' then
return title
end
path = tostring(path or '')
local ext = path:match('%.([%w%d]+)$')
if not ext or ext == '' then
return title
end
ext = ext:lower()
local suffix = '.' .. ext
if title:lower():sub(-#suffix) == suffix then
return trim(title:sub(1, #title - #suffix))
end
return title
end
local function _capture_screenshot() local function _capture_screenshot()
mp.commandv('screenshot') local function _format_time_label(seconds)
mp.osd_message('Screenshot captured', 0.7) local total = math.max(0, math.floor(tonumber(seconds or 0) or 0))
local hours = math.floor(total / 3600)
local minutes = math.floor(total / 60) % 60
local secs = total % 60
local parts = {}
if hours > 0 then
table.insert(parts, ('%dh'):format(hours))
end
if minutes > 0 or hours > 0 then
table.insert(parts, ('%dm'):format(minutes))
end
table.insert(parts, ('%ds'):format(secs))
return table.concat(parts)
end
local time = mp.get_property_number('time-pos') or mp.get_property_number('time') or 0
local label = _format_time_label(time)
local raw_title = trim(tostring(mp.get_property('media-title') or ''))
local raw_path = tostring(mp.get_property('path') or '')
if raw_title == '' then
raw_title = 'screenshot'
end
raw_title = _strip_title_extension(raw_title, raw_path)
local safe_title = _sanitize_filename_component(raw_title)
local filename = safe_title .. '_' .. label .. '.png'
local temp_dir = mp.get_property('user-data/medeia-config-temp') or os.getenv('TEMP') or os.getenv('TMP') or '/tmp'
local out_path = utils.join_path(temp_dir, filename)
local ok = pcall(function()
mp.commandv('screenshot-to-file', out_path, 'video')
end)
if not ok then
mp.osd_message('Screenshot failed', 2)
return
end
_ensure_selected_store_loaded()
local selected_store = _get_selected_store()
selected_store = trim(tostring(selected_store or ''))
selected_store = selected_store:gsub('^\"', ''):gsub('\"$', '')
if selected_store == '' then
mp.osd_message('Select a store first (Store button)', 2)
return
end
local python_exe = _resolve_python_exe(true)
if not python_exe or python_exe == '' then
mp.osd_message('Screenshot saved; Python not found', 3)
return
end
local start_dir = mp.get_script_directory() or ''
local cli_py = find_file_upwards(start_dir, 'CLI.py', 8)
if not cli_py or cli_py == '' or not utils.file_info(cli_py) then
mp.osd_message('Screenshot saved; CLI.py not found', 3)
return
end
local res = utils.subprocess({
args = { python_exe, cli_py, 'add-file', '-store', selected_store, '-path', out_path },
cancellable = false,
})
if res and res.status == 0 then
mp.osd_message('Screenshot saved to store: ' .. selected_store, 3)
else
local stderr = (res and res.stderr) or 'unknown error'
mp.osd_message('Screenshot upload failed: ' .. tostring(stderr), 5)
end
end end
mp.register_script_message('medeia-image-screenshot', function() mp.register_script_message('medeia-image-screenshot', function()
@@ -2528,6 +2633,25 @@ mp.add_key_binding("L", "medeia-lyric-toggle-shift", lyric_toggle)
mp.add_timeout(0, function() mp.add_timeout(0, function()
pcall(ensure_mpv_ipc_server) pcall(ensure_mpv_ipc_server)
pcall(_lua_log, 'medeia-lua loaded version=' .. MEDEIA_LUA_VERSION) pcall(_lua_log, 'medeia-lua loaded version=' .. MEDEIA_LUA_VERSION)
-- Load optional modules (kept in separate files).
pcall(function()
local script_dir = mp.get_script_directory() or ''
local candidates = {}
if script_dir ~= '' then
table.insert(candidates, script_dir .. '/sleep_timer.lua')
table.insert(candidates, script_dir .. '/LUA/sleep_timer.lua')
table.insert(candidates, script_dir .. '/../sleep_timer.lua')
end
table.insert(candidates, 'C:/medios/Medios-Macina/MPV/LUA/sleep_timer.lua')
for _, p in ipairs(candidates) do
local ok, chunk = pcall(loadfile, p)
if ok and chunk then
pcall(chunk)
break
end
end
end)
end) end)
return M return M

116
MPV/LUA/sleep_timer.lua Normal file
View File

@@ -0,0 +1,116 @@
local mp = require 'mp'
local utils = require 'mp.utils'
local SLEEP_MENU_TYPE = 'medeia_sleep_timer_prompt'
local _timer = nil
local function _trim(s)
s = tostring(s or '')
s = s:gsub('^%s+', '')
s = s:gsub('%s+$', '')
return s
end
local function _cancel_timer()
if _timer ~= nil then
pcall(function()
_timer:kill()
end)
_timer = nil
end
end
local function _parse_minutes(text)
local s = _trim(text)
if s == '' then
return nil
end
local lower = s:lower()
-- allow: 15, 15m, 1h, 1.5h
local hours = lower:match('^([%d%.]+)%s*h$')
if hours then
local v = tonumber(hours)
if v and v > 0 then
return v * 60
end
return nil
end
local mins = lower:match('^([%d%.]+)%s*m$')
if mins then
local v = tonumber(mins)
if v and v > 0 then
return v
end
return nil
end
local v = tonumber(lower)
if v and v > 0 then
return v
end
return nil
end
local function _open_prompt()
local menu_data = {
type = SLEEP_MENU_TYPE,
title = 'Sleep Timer',
search_style = 'palette',
search_debounce = 'submit',
on_search = 'callback',
footnote = 'Enter minutes (e.g. 30) then press Enter.',
callback = { mp.get_script_name(), 'medeia-sleep-timer-event' },
items = {},
}
local json = utils.format_json(menu_data)
local ok = pcall(function()
mp.commandv('script-message-to', 'uosc', 'open-menu', json)
end)
if not ok then
mp.osd_message('Sleep timer: uosc not available', 2.0)
end
end
local function _handle_event(json)
local ok, ev = pcall(utils.parse_json, json)
if not ok or type(ev) ~= 'table' then
return
end
if ev.type ~= 'search' then
return
end
local minutes = _parse_minutes(ev.query or '')
if not minutes then
mp.osd_message('Sleep timer cancelled', 1.0)
_cancel_timer()
return
end
_cancel_timer()
local seconds = math.floor(minutes * 60)
_timer = mp.add_timeout(seconds, function()
mp.osd_message('Sleep timer: closing mpv', 1.5)
mp.commandv('quit')
end)
mp.osd_message(string.format('Sleep timer set: %d min', math.floor(minutes + 0.5)), 1.5)
pcall(function()
mp.commandv('script-message-to', 'uosc', 'close-menu', SLEEP_MENU_TYPE)
end)
end
mp.register_script_message('medeia-sleep-timer', _open_prompt)
mp.register_script_message('medeia-sleep-timer-event', _handle_event)
return {
open_prompt = _open_prompt,
}

View File

@@ -10,10 +10,8 @@ local trim = {}
-- Configuration for trim presets -- Configuration for trim presets
trim.config = { trim.config = {
output_dir = os.getenv('TEMP') or os.getenv('TMP') or '/tmp', -- use temp dir by default output_dir = os.getenv('TEMP') or os.getenv('TMP') or '/tmp', -- use temp dir by default
video_codec = "copy", -- lossless by default
audio_codec = "copy",
container = "auto", container = "auto",
audio_bitrate = "", scale = "640:-2", -- Scale to 640 width, -2 ensures even height for codec
osd_duration = 2000, osd_duration = 2000,
} }
@@ -26,7 +24,7 @@ trim.presets = {
tiny = { video_codec="libx264", crf="28", preset="ultrafast", audio_codec="aac", audio_bitrate="64k" }, tiny = { video_codec="libx264", crf="28", preset="ultrafast", audio_codec="aac", audio_bitrate="64k" },
} }
trim.current_quality = "copy" trim.current_quality = "medium"
-- Get active preset with current quality -- Get active preset with current quality
local function _get_active_preset() local function _get_active_preset()

View File

@@ -84,7 +84,7 @@ progress_line_width=20
# fullscreen = cycle:crop_free:fullscreen:no/yes=fullscreen_exit!?Fullscreen # fullscreen = cycle:crop_free:fullscreen:no/yes=fullscreen_exit!?Fullscreen
# loop-playlist = cycle:repeat:loop-playlist:no/inf!?Loop playlist # loop-playlist = cycle:repeat:loop-playlist:no/inf!?Loop playlist
# toggle:{icon}:{prop} = cycle:{icon}:{prop}:no/yes! # toggle:{icon}:{prop} = cycle:{icon}:{prop}:no/yes!
controls=menu,gap,<video,audio>subtitles,<has_many_audio>audio,<has_many_video>video,<has_many_edition>editions,gap,shuffle,gap,prev,items,next,space,command:photo_camera:script-message medeia-image-screenshot?Screenshot,command:content_cut:script-message medeia-image-clip?Clip Marker,command:headset:script-message medeia-audio-only?Audio,command:store:script-message medeia-store-picker?Store controls=menu,gap,<video,audio>subtitles,<has_many_audio>audio,<has_many_video>video,<has_many_edition>editions,gap,shuffle,gap,prev,items,next,space,command:photo_camera:script-message medeia-image-screenshot?Screenshot,command:content_cut:script-message medeia-image-clip?Clip Marker,command:headset:script-message medeia-audio-only?Audio,command:store:script-message medeia-store-picker?Store,command:schedule:script-message medeia-sleep-timer?Sleep
controls_size=32 controls_size=32
controls_margin=8 controls_margin=8
controls_spacing=2 controls_spacing=2

132
USAGE_ADD_FILE_BATCH.md Normal file
View File

@@ -0,0 +1,132 @@
# Add-File Batch Directory Mode
## Overview
The `add-file` cmdlet now supports scanning directories for batch file operations. When you provide a directory path with the `-path` argument and specify a `-store` location, add-file will:
1. **Scan** the directory for all supported media files
2. **Hash** each file (SHA256)
3. **Display** a result table with filename, hash, size, and extension
4. **Wait** for your selection using `@N` syntax
5. **Add** only the selected files to your store
## Usage
### Basic Syntax
```bash
add-file -path <directory> -store <store-name>
```
### Step-by-Step Example
#### Step 1: Scan directory and show table
```bash
add-file -path "C:\Users\Admin\Downloads\test_add_file" -store mystore
```
Output:
```
✓ Found 3 files in directory. Use @N syntax to select (e.g., @1 or @1-3)
Files in Directory
─────────────────────────────────────────────────────────
# │ name │ hash │ size │ ext
────────────────────────────────────────────────────────────
1 │ image1.jpg │ a3f9b2c1... │ 2.3 MB │ .jpg
2 │ video1.mp4 │ d4e8f7a3... │ 45.2 MB │ .mp4
3 │ audio1.mp3 │ b1c9d2e3... │ 3.8 MB │ .mp3
```
The command **stops here** and waits for your selection - no files are processed yet.
#### Step 2: Select files using @N syntax
After the table is displayed, use one of these selection syntaxes:
```bash
# Add file 1 only
@1
# Add files 1 through 3
@1-3
# Add files 1, 2, and 3
@1,@2,@3
# Add files 2 and 3
@2-3
```
The selected file(s) are then piped back to `add-file` for processing and added to your store.
## Workflow Diagram
```
User Command:
┌─────────────────────────────────────────────────────┐
│ add-file -path "D:\media" -store mystore │
└─────────────────────────────────────────────────────┘
┌───────────────────────┐
│ STEP 1: Scan & Display│
│ - Scan directory │
│ - Compute hashes │
│ - Show result table │
│ - WAIT for @N input │
└───────────────────────┘
User Response:
┌─────────────────────────────────────────────────────┐
│ @1,@3 (select files 1 and 3) │
└─────────────────────────────────────────────────────┘
┌───────────────────────┐
│ STEP 2: Process Files │
│ - Get selected items │
│ - Copy to store │
│ - Show results │
└───────────────────────┘
Files added successfully!
```
## Supported File Types
The directory scanner supports all media files defined in `SUPPORTED_MEDIA_EXTENSIONS`:
- **Images**: .jpg, .jpeg, .png, .gif, .webp, .bmp, .tiff
- **Videos**: .mp4, .mkv, .webm, .mov, .avi, .flv, .mpg, .mpeg, .ts, .m4v, .wmv
- **Audio**: .mp3, .flac, .wav, .m4a, .aac, .ogg, .opus, .wma, .mka
- **Documents**: .pdf, .epub, .txt, .mobi, .azw3, .cbz, .cbr, .doc, .docx
## Key Behavior Notes
1. **No immediate processing**: Directory scan shows table and returns without copying/adding any files
2. **User control**: Nothing happens until the user makes an `@N` selection
3. **Batch selection**: Multiple files can be selected with comma or range syntax
4. **Hash display**: Each file's SHA256 is displayed (first 12 chars in table)
5. **Error handling**: Unsupported file types are automatically filtered out
## Implementation Details
### New Methods Added
- **`_scan_directory_for_files(directory: Path)`**: Static method that scans a directory and returns a list of dicts with:
- `path`: Path object to the file
- `name`: Filename
- `hash`: SHA256 hash
- `size`: File size in bytes
- `ext`: File extension
### Modified Methods
- **`run()`**:
- Detects when `-path` is a directory AND `-store` is provided
- Calls `_scan_directory_for_files()` to build the file list
- Displays result table
- **Returns early (return 0) without processing** - this is key!
- User selection via `@N` pipes selected items back to add-file for processing
- **`_resolve_source()`**:
- Added priority for directory scan results (path + hash keys)
- Handles items coming from @N selection seamlessly
## Error Handling
- If directory doesn't exist or isn't readable, returns error
- If a file fails to hash, it's skipped with debug output logged
- Unsupported file types are automatically filtered out during scan

View File

@@ -117,6 +117,72 @@ class Add_File(Cmdlet):
stage_ctx = ctx.get_stage_context() stage_ctx = ctx.get_stage_context()
is_last_stage = (stage_ctx is None) or bool(getattr(stage_ctx, "is_last_stage", False)) is_last_stage = (stage_ctx is None) or bool(getattr(stage_ctx, "is_last_stage", False))
# Directory-mode selector:
# - First pass: `add-file -store X -path <DIR>` should ONLY show a selectable table.
# - Second pass (triggered by @ selection expansion): re-run add-file with `-path file1,file2,...`
# and actually ingest/copy.
dir_scan_mode = False
dir_scan_results: Optional[List[Dict[str, Any]]] = None
explicit_path_list_results: Optional[List[Dict[str, Any]]] = None
if path_arg and location and not provider_name:
# Support comma-separated path lists: -path "file1,file2,file3"
# This is the mechanism used by @N expansion for directory tables.
try:
path_text = str(path_arg)
except Exception:
path_text = ""
if "," in path_text:
parts = [p.strip().strip('"') for p in path_text.split(",")]
parts = [p for p in parts if p]
batch: List[Dict[str, Any]] = []
for p in parts:
try:
file_path = Path(p)
except Exception:
continue
if not file_path.exists() or not file_path.is_file():
continue
ext = file_path.suffix.lower()
if ext not in SUPPORTED_MEDIA_EXTENSIONS:
continue
try:
hv = sha256_file(file_path)
except Exception:
continue
try:
size = file_path.stat().st_size
except Exception:
size = 0
batch.append({
"path": file_path,
"name": file_path.name,
"hash": hv,
"size": size,
"ext": ext,
})
if batch:
explicit_path_list_results = batch
# Clear path_arg so add-file doesn't treat it as a single path.
path_arg = None
else:
# Directory scan (selector table, no ingest yet)
try:
candidate_dir = Path(str(path_arg))
if candidate_dir.exists() and candidate_dir.is_dir():
dir_scan_mode = True
debug(f"[add-file] Scanning directory for batch add: {candidate_dir}")
dir_scan_results = Add_File._scan_directory_for_files(candidate_dir)
if dir_scan_results:
debug(f"[add-file] Found {len(dir_scan_results)} supported files in directory")
# Clear path_arg so it doesn't trigger single-item mode.
path_arg = None
except Exception as exc:
debug(f"[add-file] Directory scan failed: {exc}")
# Determine if -store targets a registered backend (vs a filesystem export path). # Determine if -store targets a registered backend (vs a filesystem export path).
is_storage_backend_location = False is_storage_backend_location = False
if location: if location:
@@ -127,9 +193,16 @@ class Add_File(Cmdlet):
is_storage_backend_location = False is_storage_backend_location = False
# Decide which items to process. # Decide which items to process.
# - If directory scan was performed, use those results
# - If user provided -path (and it was not reinterpreted as destination), treat this invocation as single-item. # - If user provided -path (and it was not reinterpreted as destination), treat this invocation as single-item.
# - Otherwise, if piped input is a list, ingest each item. # - Otherwise, if piped input is a list, ingest each item.
if path_arg: if explicit_path_list_results:
items_to_process = explicit_path_list_results
debug(f"[add-file] Using {len(items_to_process)} files from -path list")
elif dir_scan_results:
items_to_process = dir_scan_results
debug(f"[add-file] Using {len(items_to_process)} files from directory scan")
elif path_arg:
items_to_process: List[Any] = [result] items_to_process: List[Any] = [result]
elif isinstance(result, list) and result: elif isinstance(result, list) and result:
items_to_process = list(result) items_to_process = list(result)
@@ -152,6 +225,65 @@ class Add_File(Cmdlet):
debug(f"[add-file] INPUT result is list with {len(result)} items") debug(f"[add-file] INPUT result is list with {len(result)} items")
debug(f"[add-file] PARSED args: location={location}, provider={provider_name}, delete={delete_after}") debug(f"[add-file] PARSED args: location={location}, provider={provider_name}, delete={delete_after}")
# If this invocation was directory selector mode, show a selectable table and stop.
# The user then runs @N (optionally piped), which replays add-file with selected paths.
if dir_scan_mode:
try:
from result_table import ResultTable
from pathlib import Path as _Path
# Build base args to replay: keep everything except the directory -path.
base_args: List[str] = []
skip_next = False
for tok in list(args or []):
if skip_next:
skip_next = False
continue
t = str(tok)
if t in {"-path", "--path", "-p"}:
skip_next = True
continue
base_args.append(t)
table = ResultTable(title="Files in Directory", preserve_order=True)
table.set_table("add-file.directory")
table.set_source_command("add-file", base_args)
rows: List[Dict[str, Any]] = []
for file_info in (dir_scan_results or []):
p = file_info.get("path")
hp = str(file_info.get("hash") or "")
name = str(file_info.get("name") or "unknown")
try:
clean_title = _Path(name).stem
except Exception:
clean_title = name
ext = str(file_info.get("ext") or "").lstrip(".")
size = file_info.get("size", 0)
row_item = {
"path": str(p) if p is not None else "",
"hash": hp,
"title": clean_title,
"columns": [
("Title", clean_title),
("Hash", hp),
("Size", size),
("Ext", ext),
],
# Used by @N replay (CLI will combine selected rows into -path file1,file2,...)
"_selection_args": ["-path", str(p) if p is not None else ""],
}
rows.append(row_item)
table.add_result(row_item)
ctx.set_current_stage_table(table)
ctx.set_last_result_table(table, rows, subject={"table": "add-file.directory"})
log(f"✓ Found {len(rows)} files. Select with @N (e.g., @1 or @1-3).")
return 0
except Exception as exc:
debug(f"[add-file] Failed to display directory scan result table: {exc}")
collected_payloads: List[Dict[str, Any]] = [] collected_payloads: List[Dict[str, Any]] = []
pending_relationship_pairs: Dict[str, set[tuple[str, str]]] = {} pending_relationship_pairs: Dict[str, set[tuple[str, str]]] = {}
pending_url_associations: Dict[str, List[tuple[str, List[str]]]] = {} pending_url_associations: Dict[str, List[tuple[str, List[str]]]] = {}
@@ -976,7 +1108,23 @@ class Add_File(Cmdlet):
Returns (media_path_or_url, file_hash) Returns (media_path_or_url, file_hash)
where media_path_or_url can be a Path object or a URL string. where media_path_or_url can be a Path object or a URL string.
""" """
# PRIORITY 1: Try hash+store from result dict (most reliable for @N selections) # PRIORITY 1a: Try hash+path from directory scan result (has 'path' and 'hash' keys)
if isinstance(result, dict):
result_path = result.get("path")
result_hash = result.get("hash")
# Check if this looks like a directory scan result (has path and hash but no 'store' key)
result_store = result.get("store")
if result_path and result_hash and not result_store:
try:
media_path = Path(result_path) if not isinstance(result_path, Path) else result_path
if media_path.exists() and media_path.is_file():
debug(f"[add-file] Using path+hash from directory scan: {media_path}")
pipe_obj.path = str(media_path)
return media_path, str(result_hash)
except Exception as exc:
debug(f"[add-file] Failed to use directory scan result: {exc}")
# PRIORITY 1b: Try hash+store from result dict (most reliable for @N selections)
if isinstance(result, dict): if isinstance(result, dict):
result_hash = result.get("hash") result_hash = result.get("hash")
result_store = result.get("store") result_store = result.get("store")
@@ -1104,6 +1252,56 @@ class Add_File(Cmdlet):
log("File path could not be resolved") log("File path could not be resolved")
return None, None return None, None
@staticmethod
def _scan_directory_for_files(directory: Path) -> List[Dict[str, Any]]:
"""Scan a directory for supported media files and return list of file info dicts.
Each dict contains:
- path: Path object
- name: filename
- hash: sha256 hash
- size: file size in bytes
- ext: file extension
"""
if not directory.exists() or not directory.is_dir():
return []
files_info: List[Dict[str, Any]] = []
try:
for item in directory.iterdir():
if not item.is_file():
continue
ext = item.suffix.lower()
if ext not in SUPPORTED_MEDIA_EXTENSIONS:
continue
# Compute hash
try:
file_hash = sha256_file(item)
except Exception as exc:
debug(f"Failed to hash {item}: {exc}")
continue
# Get file size
try:
size = item.stat().st_size
except Exception:
size = 0
files_info.append({
"path": item,
"name": item.name,
"hash": file_hash,
"size": size,
"ext": ext,
})
except Exception as exc:
debug(f"Error scanning directory {directory}: {exc}")
return files_info
@staticmethod @staticmethod
def _fetch_hydrus_path( def _fetch_hydrus_path(
file_hash: str, file_hash: str,

View File

@@ -3,6 +3,7 @@ from __future__ import annotations
from typing import Any, Dict, List, Sequence, Optional from typing import Any, Dict, List, Sequence, Optional
from pathlib import Path from pathlib import Path
import sys import sys
import re
from SYS.logger import log from SYS.logger import log
@@ -26,6 +27,184 @@ from Store import Store
from SYS.utils import sha256_file from SYS.utils import sha256_file
_FIELD_NAME_RE = re.compile(r"^[A-Za-z0-9_]+$")
def _normalize_title_for_extract(text: str) -> str:
"""Normalize common separators in titles for matching.
Helps when sources use unicode dashes or odd whitespace.
"""
s = str(text or "").strip()
if not s:
return s
# Common unicode dash variants -> '-'
s = s.replace("\u2013", "-") # en dash
s = s.replace("\u2014", "-") # em dash
s = s.replace("\u2212", "-") # minus sign
s = s.replace("\u2010", "-") # hyphen
s = s.replace("\u2011", "-") # non-breaking hyphen
s = s.replace("\u2012", "-") # figure dash
s = s.replace("\u2015", "-") # horizontal bar
return s
def _strip_title_prefix(text: str) -> str:
s = str(text or "").strip()
if s.lower().startswith("title:"):
s = s.split(":", 1)[1].strip()
return s
def _literal_to_title_pattern_regex(literal: str) -> str:
"""Convert a literal chunk of a template into a regex fragment.
Keeps punctuation literal, but treats any whitespace run as \\s*.
"""
out: List[str] = []
i = 0
while i < len(literal):
ch = literal[i]
if ch.isspace():
while i < len(literal) and literal[i].isspace():
i += 1
out.append(r"\\s*")
continue
out.append(re.escape(ch))
i += 1
return "".join(out)
def _compile_extract_template(template: str) -> tuple[re.Pattern[str], List[str]]:
"""Compile a simple (field) template into a regex.
Example template:
(artist) - (album) - (disk)-(track) (title)
This is *not* user-facing regex: we only support named fields in parentheses.
"""
tpl = str(template or "").strip()
if not tpl:
raise ValueError("empty extract template")
matches = list(re.finditer(r"\(([^)]+)\)", tpl))
if not matches:
raise ValueError("extract template must contain at least one (field)")
field_names: List[str] = []
parts: List[str] = [r"^\\s*"]
last_end = 0
for idx, m in enumerate(matches):
literal = tpl[last_end : m.start()]
if literal:
parts.append(_literal_to_title_pattern_regex(literal))
raw_name = (m.group(1) or "").strip()
if not raw_name or not _FIELD_NAME_RE.fullmatch(raw_name):
raise ValueError(f"invalid field name '{raw_name}' (use A-Z, 0-9, underscore)")
field_names.append(raw_name)
is_last = idx == (len(matches) - 1)
if is_last:
parts.append(fr"(?P<{raw_name}>.+)")
else:
parts.append(fr"(?P<{raw_name}>.+?)")
last_end = m.end()
tail = tpl[last_end:]
if tail:
parts.append(_literal_to_title_pattern_regex(tail))
parts.append(r"\\s*$")
rx = "".join(parts)
return re.compile(rx, flags=re.IGNORECASE), field_names
def _extract_tags_from_title(title_text: str, template: str) -> List[str]:
"""Extract (field)->value from title_text and return ['field:value', ...]."""
title_clean = _normalize_title_for_extract(_strip_title_prefix(title_text))
if not title_clean:
return []
pattern, field_names = _compile_extract_template(template)
m = pattern.match(title_clean)
if not m:
return []
out: List[str] = []
for name in field_names:
value = (m.group(name) or "").strip()
if not value:
continue
out.append(f"{name}:{value}")
return out
def _get_title_candidates_for_extraction(res: Any, existing_tags: Optional[List[str]] = None) -> List[str]:
"""Return a list of possible title strings in priority order."""
candidates: List[str] = []
def add_candidate(val: Any) -> None:
if val is None:
return
s = _normalize_title_for_extract(_strip_title_prefix(str(val)))
if not s:
return
if s not in candidates:
candidates.append(s)
# 1) Item's title field (may be a display title, not the title: tag)
try:
add_candidate(get_field(res, "title"))
except Exception:
pass
if isinstance(res, dict):
add_candidate(res.get("title"))
# 2) title: tag from either store tags or piped tags
tags = existing_tags if isinstance(existing_tags, list) else _extract_item_tags(res)
add_candidate(_extract_title_tag(tags) or "")
# 3) Filename stem
try:
path_val = get_field(res, "path")
if path_val:
p = Path(str(path_val))
add_candidate((p.stem or "").strip())
except Exception:
pass
return candidates
def _extract_tags_from_title_candidates(candidates: List[str], template: str) -> tuple[List[str], Optional[str]]:
"""Try candidates in order; return (tags, matched_candidate)."""
for c in candidates:
extracted = _extract_tags_from_title(c, template)
if extracted:
return extracted, c
return [], None
def _try_compile_extract_template(template: Optional[str]) -> tuple[Optional[re.Pattern[str]], Optional[str]]:
"""Compile template for debug; return (pattern, error_message)."""
if template is None:
return None, None
try:
pattern, _fields = _compile_extract_template(str(template))
return pattern, None
except Exception as exc:
return None, str(exc)
def _extract_title_tag(tags: List[str]) -> Optional[str]: def _extract_title_tag(tags: List[str]) -> Optional[str]:
"""Return the value of the first title: tag if present.""" """Return the value of the first title: tag if present."""
for t in tags: for t in tags:
@@ -242,6 +421,8 @@ class Add_Tag(Cmdlet):
CmdletArg("tag", type="string", required=False, description="One or more tag to add. Comma- or space-separated. Can also use {list_name} syntax. If omitted, uses tag from pipeline payload.", variadic=True), CmdletArg("tag", type="string", required=False, description="One or more tag to add. Comma- or space-separated. Can also use {list_name} syntax. If omitted, uses tag from pipeline payload.", variadic=True),
SharedArgs.QUERY, SharedArgs.QUERY,
SharedArgs.STORE, SharedArgs.STORE,
CmdletArg("-extract", type="string", description="Extract tags from the item's title using a simple template with (field) placeholders. Example: -extract \"(artist) - (album) - (disk)-(track) (title)\" will add artist:, album:, disk:, track:, title: tags."),
CmdletArg("--extract-debug", type="flag", description="Print debug info for -extract matching (matched title source and extracted tags)."),
CmdletArg("-duplicate", type="string", description="Copy existing tag values to new namespaces. Formats: title:album,artist (explicit) or title,album,artist (inferred)"), CmdletArg("-duplicate", type="string", description="Copy existing tag values to new namespaces. Formats: title:album,artist (explicit) or title,album,artist (inferred)"),
CmdletArg("-list", type="string", description="Load predefined tag lists from adjective.json. Comma-separated list names (e.g., -list philosophy,occult)."), CmdletArg("-list", type="string", description="Load predefined tag lists from adjective.json. Comma-separated list names (e.g., -list philosophy,occult)."),
CmdletArg("--all", type="flag", description="Include temporary files in tagging (by default, only tag non-temporary files)."), CmdletArg("--all", type="flag", description="Include temporary files in tagging (by default, only tag non-temporary files)."),
@@ -258,6 +439,7 @@ class Add_Tag(Cmdlet):
" Inferred format: -duplicate title,album,artist (first is source, rest are targets)", " Inferred format: -duplicate title,album,artist (first is source, rest are targets)",
"- The source namespace must already exist in the file being tagged.", "- The source namespace must already exist in the file being tagged.",
"- Target namespaces that already have a value are skipped (not overwritten).", "- Target namespaces that already have a value are skipped (not overwritten).",
"- Use -extract to derive namespaced tags from the current title (title field or title: tag) using a simple template.",
], ],
exec=self.run, exec=self.run,
) )
@@ -272,6 +454,13 @@ class Add_Tag(Cmdlet):
# Parse arguments # Parse arguments
parsed = parse_cmdlet_args(args, self) parsed = parse_cmdlet_args(args, self)
extract_template = parsed.get("extract")
if extract_template is not None:
extract_template = str(extract_template)
extract_debug = bool(parsed.get("extract-debug", False))
extract_debug_rx, extract_debug_err = _try_compile_extract_template(extract_template)
query_hash = sh.parse_single_hash_query(parsed.get("query")) query_hash = sh.parse_single_hash_query(parsed.get("query"))
if parsed.get("query") and not query_hash: if parsed.get("query") and not query_hash:
log("[add_tag] Error: -query must be of the form hash:<sha256>", file=sys.stderr) log("[add_tag] Error: -query must be of the form hash:<sha256>", file=sys.stderr)
@@ -304,8 +493,10 @@ class Add_Tag(Cmdlet):
if isinstance(raw_tag, str): if isinstance(raw_tag, str):
raw_tag = [raw_tag] raw_tag = [raw_tag]
# Fallback: if no tag provided explicitly, try to pull from first result payload # Fallback: if no tag provided explicitly, try to pull from first result payload.
if not raw_tag and results: # IMPORTANT: when -extract is used, users typically want *only* extracted tags,
# not "re-add whatever tags are already in the payload".
if not raw_tag and results and not extract_template:
first = results[0] first = results[0]
payload_tag = None payload_tag = None
@@ -341,8 +532,12 @@ class Add_Tag(Cmdlet):
tag_to_add = parse_tag_arguments(raw_tag) tag_to_add = parse_tag_arguments(raw_tag)
tag_to_add = expand_tag_groups(tag_to_add) tag_to_add = expand_tag_groups(tag_to_add)
if not tag_to_add: if not tag_to_add and not extract_template:
log("No tag provided to add", file=sys.stderr) log("No tag provided to add (and no -extract template provided)", file=sys.stderr)
return 1
if extract_template and extract_debug and extract_debug_err:
log(f"[add_tag] extract template error: {extract_debug_err}", file=sys.stderr)
return 1 return 1
# Get other flags # Get other flags
@@ -355,6 +550,9 @@ class Add_Tag(Cmdlet):
store_registry = Store(config) store_registry = Store(config)
extract_matched_items = 0
extract_no_match_items = 0
for res in results: for res in results:
store_name: Optional[str] store_name: Optional[str]
raw_hash: Optional[str] raw_hash: Optional[str]
@@ -389,6 +587,24 @@ class Add_Tag(Cmdlet):
existing_lower = {t.lower() for t in existing_tag_list if isinstance(t, str)} existing_lower = {t.lower() for t in existing_tag_list if isinstance(t, str)}
item_tag_to_add = list(tag_to_add) item_tag_to_add = list(tag_to_add)
if extract_template:
candidates = _get_title_candidates_for_extraction(res, existing_tag_list)
extracted, matched = _extract_tags_from_title_candidates(candidates, extract_template)
if extracted:
extract_matched_items += 1
if extract_debug:
log(f"[add_tag] extract matched: {matched!r} -> {extracted}", file=sys.stderr)
for new_tag in extracted:
if new_tag.lower() not in existing_lower:
item_tag_to_add.append(new_tag)
else:
extract_no_match_items += 1
if extract_debug:
rx_preview = extract_debug_rx.pattern if extract_debug_rx else "<uncompiled>"
cand_preview = "; ".join([repr(c) for c in candidates[:3]])
log(f"[add_tag] extract no match for template {extract_template!r}. regex: {rx_preview!r}. candidates: {cand_preview}", file=sys.stderr)
item_tag_to_add = collapse_namespace_tag(item_tag_to_add, "title", prefer="last") item_tag_to_add = collapse_namespace_tag(item_tag_to_add, "title", prefer="last")
if duplicate_arg: if duplicate_arg:
@@ -492,6 +708,24 @@ class Add_Tag(Cmdlet):
# Per-item tag list (do not mutate shared list) # Per-item tag list (do not mutate shared list)
item_tag_to_add = list(tag_to_add) item_tag_to_add = list(tag_to_add)
if extract_template:
candidates2 = _get_title_candidates_for_extraction(res, existing_tag_list)
extracted2, matched2 = _extract_tags_from_title_candidates(candidates2, extract_template)
if extracted2:
extract_matched_items += 1
if extract_debug:
log(f"[add_tag] extract matched: {matched2!r} -> {extracted2}", file=sys.stderr)
for new_tag in extracted2:
if new_tag.lower() not in existing_lower:
item_tag_to_add.append(new_tag)
else:
extract_no_match_items += 1
if extract_debug:
rx_preview2 = extract_debug_rx.pattern if extract_debug_rx else "<uncompiled>"
cand_preview2 = "; ".join([repr(c) for c in candidates2[:3]])
log(f"[add_tag] extract no match for template {extract_template!r}. regex: {rx_preview2!r}. candidates: {cand_preview2}", file=sys.stderr)
item_tag_to_add = collapse_namespace_tag(item_tag_to_add, "title", prefer="last") item_tag_to_add = collapse_namespace_tag(item_tag_to_add, "title", prefer="last")
# Handle -duplicate logic (copy existing tag to new namespaces) # Handle -duplicate logic (copy existing tag to new namespaces)
@@ -563,6 +797,12 @@ class Add_Tag(Cmdlet):
f"[add_tag] Added {total_added} new tag(s) across {len(results)} item(s); modified {total_modified} item(s)", f"[add_tag] Added {total_added} new tag(s) across {len(results)} item(s); modified {total_modified} item(s)",
file=sys.stderr, file=sys.stderr,
) )
if extract_template and extract_matched_items == 0:
log(f"[add_tag] extract: no matches for template '{extract_template}' across {len(results)} item(s)", file=sys.stderr)
elif extract_template and extract_no_match_items > 0 and extract_debug:
log(f"[add_tag] extract: matched {extract_matched_items}, no-match {extract_no_match_items}", file=sys.stderr)
return 0 return 0