f
This commit is contained in:
@@ -7,7 +7,7 @@ these pure helpers are easier to test.
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any, Dict, List, Optional, Set, Tuple
|
||||
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
|
||||
|
||||
# Prompt-toolkit lexer types are optional at import time; fall back to lightweight
|
||||
# stubs if prompt_toolkit is not available so imports remain safe for testing.
|
||||
@@ -363,3 +363,111 @@ class SelectionFilterSyntax:
|
||||
return True
|
||||
|
||||
|
||||
class MedeiaLexer(Lexer):
|
||||
def lex_document(self, document: "Document") -> Callable[[int], List[Tuple[str, str]]]: # type: ignore[override]
|
||||
|
||||
def get_line(lineno: int) -> List[Tuple[str, str]]:
|
||||
"""Return token list for a single input line (used by prompt-toolkit)."""
|
||||
line = document.lines[lineno]
|
||||
tokens: List[Tuple[str, str]] = []
|
||||
|
||||
# Using TOKEN_PATTERN precompiled at module scope.
|
||||
|
||||
is_cmdlet = True
|
||||
|
||||
def _emit_keyed_value(word: str) -> bool:
|
||||
"""Emit `key:` prefixes (comma-separated) as argument tokens.
|
||||
|
||||
Designed for values like:
|
||||
clip:3m4s-3m14s,1h22m-1h33m,item:2-3
|
||||
|
||||
Avoids special-casing URLs (://) and Windows drive paths (C:\\...).
|
||||
Returns True if it handled the token.
|
||||
"""
|
||||
if not word or ":" not in word:
|
||||
return False
|
||||
# Avoid URLs and common scheme patterns.
|
||||
if "://" in word:
|
||||
return False
|
||||
# Avoid Windows drive paths (e.g., C:\\foo or D:/bar)
|
||||
if DRIVE_RE.match(word):
|
||||
return False
|
||||
|
||||
parts = word.split(",")
|
||||
handled_any = False
|
||||
for i, part in enumerate(parts):
|
||||
if i > 0:
|
||||
tokens.append(("class:value", ","))
|
||||
if part == "":
|
||||
continue
|
||||
m = KEY_PREFIX_RE.match(part)
|
||||
if m:
|
||||
tokens.append(("class:argument", m.group(1)))
|
||||
if m.group(2):
|
||||
tokens.append(("class:value", m.group(2)))
|
||||
handled_any = True
|
||||
else:
|
||||
tokens.append(("class:value", part))
|
||||
handled_any = True
|
||||
|
||||
return handled_any
|
||||
|
||||
for match in TOKEN_PATTERN.finditer(line):
|
||||
ws, pipe, quote, word = match.groups()
|
||||
if ws:
|
||||
tokens.append(("", ws))
|
||||
continue
|
||||
if pipe:
|
||||
tokens.append(("class:pipe", pipe))
|
||||
is_cmdlet = True
|
||||
continue
|
||||
if quote:
|
||||
# If the quoted token contains a keyed spec (clip:/item:/hash:),
|
||||
# highlight the `key:` portion in argument-blue even inside quotes.
|
||||
if len(quote) >= 2 and quote[0] == quote[-1] and quote[0] in ('"', "'"):
|
||||
q = quote[0]
|
||||
inner = quote[1:-1]
|
||||
start_index = len(tokens)
|
||||
if _emit_keyed_value(inner):
|
||||
tokens.insert(start_index, ("class:string", q))
|
||||
tokens.append(("class:string", q))
|
||||
is_cmdlet = False
|
||||
continue
|
||||
|
||||
tokens.append(("class:string", quote))
|
||||
is_cmdlet = False
|
||||
continue
|
||||
if not word:
|
||||
continue
|
||||
|
||||
if word.startswith("@"): # selection tokens
|
||||
rest = word[1:]
|
||||
if rest and SELECTION_RANGE_RE.fullmatch(rest):
|
||||
tokens.append(("class:selection_at", "@"))
|
||||
tokens.append(("class:selection_range", rest))
|
||||
is_cmdlet = False
|
||||
continue
|
||||
if rest and ":" in rest:
|
||||
tokens.append(("class:selection_at", "@"))
|
||||
tokens.append(("class:selection_filter", rest))
|
||||
is_cmdlet = False
|
||||
continue
|
||||
if rest == "":
|
||||
tokens.append(("class:selection_at", "@"))
|
||||
is_cmdlet = False
|
||||
continue
|
||||
|
||||
if is_cmdlet:
|
||||
tokens.append(("class:cmdlet", word))
|
||||
is_cmdlet = False
|
||||
elif word.startswith("-"):
|
||||
tokens.append(("class:argument", word))
|
||||
else:
|
||||
if not _emit_keyed_value(word):
|
||||
tokens.append(("class:value", word))
|
||||
|
||||
return tokens
|
||||
|
||||
return get_line
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user