Files
Medios-Macina/SYS/config.py
2026-02-09 17:22:40 -08:00

821 lines
30 KiB
Python

""" """
from __future__ import annotations
import json
import sqlite3
import time
import os
import traceback
import datetime
import sys
import getpass
import hashlib
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
from SYS.logger import log
import logging
logger = logging.getLogger(__name__)
from SYS.utils import expand_path
from SYS.database import db, get_config_all, save_config_value, rows_to_config
SCRIPT_DIR = Path(__file__).resolve().parent
# Save lock settings (cross-process)
_SAVE_LOCK_DIRNAME = ".medios_save_lock"
_SAVE_LOCK_TIMEOUT = 30.0 # seconds to wait for save lock
_SAVE_LOCK_STALE_SECONDS = 3600 # consider lock stale after 1 hour
_CONFIG_CACHE: Dict[str, Any] = {}
_LAST_SAVED_CONFIG: Dict[str, Any] = {}
_CONFIG_SAVE_MAX_RETRIES = 5
_CONFIG_SAVE_RETRY_DELAY = 0.15
class ConfigSaveConflict(Exception):
"""Raised when a save would overwrite external changes present on disk."""
pass
def global_config() -> List[Dict[str, Any]]:
"""Return configuration schema for global settings."""
return [
{
"key": "debug",
"label": "Debug Output",
"default": "false",
"choices": ["true", "false"]
},
{
"key": "auto_update",
"label": "Auto-Update",
"default": "true",
"choices": ["true", "false"]
}
]
def clear_config_cache() -> None:
"""Clear the configuration cache and baseline snapshot."""
global _CONFIG_CACHE, _LAST_SAVED_CONFIG
_CONFIG_CACHE = {}
_LAST_SAVED_CONFIG = {}
def get_hydrus_instance(
config: Dict[str, Any], instance_name: str = "home"
) -> Optional[Dict[str, Any]]:
"""Get a specific Hydrus instance config by name.
Supports modern config plus a fallback when no exact match exists.
"""
store = config.get("store", {})
if not isinstance(store, dict):
return None
hydrusnetwork = store.get("hydrusnetwork", {})
if not isinstance(hydrusnetwork, dict) or not hydrusnetwork:
return None
instance = hydrusnetwork.get(instance_name)
if isinstance(instance, dict):
return instance
target = str(instance_name or "").lower()
for name, conf in hydrusnetwork.items():
if isinstance(conf, dict) and str(name).lower() == target:
return conf
keys = sorted(hydrusnetwork.keys())
for key in keys:
if not str(key or "").startswith("new_"):
candidate = hydrusnetwork.get(key)
if isinstance(candidate, dict):
return candidate
first_key = keys[0]
candidate = hydrusnetwork.get(first_key)
if isinstance(candidate, dict):
return candidate
return None
def get_hydrus_access_key(config: Dict[str, Any], instance_name: str = "home") -> Optional[str]:
"""Get Hydrus access key for an instance.
Config format:
- config["store"]["hydrusnetwork"][name]["API"]
Args:
config: Configuration dict
instance_name: Name of the Hydrus instance (default: "home")
Returns:
Access key string, or None if not found
"""
instance = get_hydrus_instance(config, instance_name)
if instance:
key = instance.get("API")
return str(key).strip() if key else None
return None
def get_hydrus_url(config: Dict[str, Any], instance_name: str = "home") -> Optional[str]:
"""Get Hydrus URL for an instance.
Config format:
- config["store"]["hydrusnetwork"][name]["URL"]
Args:
config: Configuration dict
instance_name: Name of the Hydrus instance (default: "home")
Returns:
URL string, or None if not found
"""
instance = get_hydrus_instance(config, instance_name)
url = instance.get("URL") if instance else None
return str(url).strip() if url else None
def get_provider_block(config: Dict[str, Any], name: str) -> Dict[str, Any]:
provider_cfg = config.get("provider")
if not isinstance(provider_cfg, dict):
return {}
normalized = _normalize_provider_name(name)
if normalized:
block = provider_cfg.get(normalized)
if isinstance(block, dict):
return block
for key, block in provider_cfg.items():
if not isinstance(block, dict):
continue
if _normalize_provider_name(key) == normalized:
return block
return {}
def get_soulseek_username(config: Dict[str, Any]) -> Optional[str]:
block = get_provider_block(config, "soulseek")
val = block.get("username") or block.get("USERNAME")
return str(val).strip() if val else None
def get_soulseek_password(config: Dict[str, Any]) -> Optional[str]:
block = get_provider_block(config, "soulseek")
val = block.get("password") or block.get("PASSWORD")
return str(val).strip() if val else None
def resolve_output_dir(config: Dict[str, Any]) -> Path:
"""Resolve output directory from config with single source of truth.
Priority:
1. config["temp"] - explicitly set temp/output directory
2. config["outfile"] - fallback to outfile setting
3. System Temp - default fallback directory
Returns:
Path to output directory
"""
# First try explicit temp setting from config
temp_value = config.get("temp")
if temp_value:
try:
path = expand_path(temp_value)
# Verify we can access it (not a system directory with permission issues)
if path.exists() or path.parent.exists():
return path
except Exception as exc:
logger.debug("resolve_output_dir: failed to expand temp value %r: %s", temp_value, exc, exc_info=True)
# Then try outfile setting
outfile_value = config.get("outfile")
if outfile_value:
try:
return expand_path(outfile_value)
except Exception as exc:
logger.debug("resolve_output_dir: failed to expand outfile value %r: %s", outfile_value, exc, exc_info=True)
# Fallback to system temp directory
return Path(tempfile.gettempdir())
def get_local_storage_path(config: Dict[str, Any]) -> Optional[Path]:
"""Get local storage path from config.
Supports multiple formats:
- Old: config["storage"]["local"]["path"]
- Old: config["Local"]["path"]
Args:
config: Configuration dict
Returns:
Path object if found, None otherwise
"""
# Fall back to storage.local.path format
storage = config.get("storage", {})
if isinstance(storage, dict):
local_config = storage.get("local", {})
if isinstance(local_config, dict):
path_str = local_config.get("path")
if path_str:
return expand_path(path_str)
# Fall back to old Local format
local_config = config.get("Local", {})
if isinstance(local_config, dict):
path_str = local_config.get("path")
if path_str:
return expand_path(path_str)
return None
def get_debrid_api_key(config: Dict[str, Any], service: str = "All-debrid") -> Optional[str]:
"""Get Debrid API key from config.
Config format:
- config["store"]["debrid"][<name>]["api_key"]
where <name> is the store name (e.g. "all-debrid")
Args:
config: Configuration dict
service: Service name (default: "All-debrid")
Returns:
API key string if found, None otherwise
"""
store = config.get("store", {})
if not isinstance(store, dict):
return None
debrid_config = store.get("debrid", {})
if not isinstance(debrid_config, dict):
return None
service_key = str(service).strip().lower()
entry = debrid_config.get(service_key)
if isinstance(entry, dict):
api_key = entry.get("api_key")
return str(api_key).strip() if api_key else None
if isinstance(entry, str):
return entry.strip() or None
return None
def get_provider_credentials(config: Dict[str, Any], provider: str) -> Optional[Dict[str, str]]:
"""Get provider credentials (email/password) from config.
Supports both formats:
- New: config["provider"][provider] = {"email": "...", "password": "..."}
- Old: config[provider.capitalize()] = {"email": "...", "password": "..."}
Args:
config: Configuration dict
provider: Provider name (e.g., "openlibrary", "soulseek")
Returns:
Dict with credentials if found, None otherwise
"""
# Try new format first
provider_config = config.get("provider", {})
if isinstance(provider_config, dict):
creds = provider_config.get(provider.lower(), {})
if isinstance(creds, dict) and creds:
return creds
# Fall back to old format (capitalized key)
old_key_map = {
"openlibrary": "OpenLibrary",
"archive": "Archive",
"soulseek": "Soulseek",
}
old_key = old_key_map.get(provider.lower())
if old_key:
creds = config.get(old_key, {})
if isinstance(creds, dict) and creds:
return creds
return None
def resolve_cookies_path(
config: Dict[str, Any], script_dir: Optional[Path] = None
) -> Optional[Path]:
# Only support modular config style:
# [tool=ytdlp]
# cookies="C:\\path\\cookies.txt"
values: list[Any] = []
try:
tool = config.get("tool")
if isinstance(tool, dict):
ytdlp = tool.get("ytdlp")
if isinstance(ytdlp, dict):
values.append(ytdlp.get("cookies"))
values.append(ytdlp.get("cookiefile"))
except Exception as exc:
logger.debug("resolve_cookies_path: failed to read tool.ytdlp cookies: %s", exc, exc_info=True)
base_dir = script_dir or SCRIPT_DIR
for value in values:
if not value:
continue
candidate = expand_path(value)
if not candidate.is_absolute():
candidate = expand_path(base_dir / candidate)
if candidate.is_file():
return candidate
default_path = base_dir / "cookies.txt"
if default_path.is_file():
return default_path
return None
def resolve_debug_log(config: Dict[str, Any]) -> Optional[Path]:
value = config.get("download_debug_log")
if not value:
return None
path = expand_path(value)
if not path.is_absolute():
path = Path.cwd() / path
return path
def _normalize_provider_name(value: Any) -> Optional[str]:
candidate = str(value or "").strip().lower()
return candidate if candidate else None
def _extract_api_key(value: Any) -> Optional[str]:
if isinstance(value, dict):
for key in ("api_key", "API_KEY", "apikey", "APIKEY"):
candidate = value.get(key)
if isinstance(candidate, str) and candidate.strip():
return candidate.strip()
elif isinstance(value, str):
trimmed = value.strip()
if trimmed:
return trimmed
return None
def _sync_alldebrid_api_key(config: Dict[str, Any]) -> None:
if not isinstance(config, dict):
return
providers = config.get("provider")
if not isinstance(providers, dict):
providers = {}
config["provider"] = providers
provider_entry = providers.get("alldebrid")
provider_section: Dict[str, Any] | None = None
provider_key = None
if isinstance(provider_entry, dict):
provider_section = provider_entry
provider_key = _extract_api_key(provider_section)
elif isinstance(provider_entry, str):
provider_key = provider_entry.strip()
if provider_key:
provider_section = {"api_key": provider_key}
providers["alldebrid"] = provider_section
store_block = config.get("store")
if not isinstance(store_block, dict):
store_block = {}
config["store"] = store_block
debrid_block = store_block.get("debrid")
store_key = None
if isinstance(debrid_block, dict):
service_entry = debrid_block.get("all-debrid")
if isinstance(service_entry, dict):
store_key = _extract_api_key(service_entry)
elif isinstance(service_entry, str):
store_key = service_entry.strip()
if store_key:
debrid_block["all-debrid"] = {"api_key": store_key}
else:
debrid_block = None
if provider_key:
if debrid_block is None:
debrid_block = {}
store_block["debrid"] = debrid_block
service_section = debrid_block.get("all-debrid")
if not isinstance(service_section, dict):
service_section = {}
debrid_block["all-debrid"] = service_section
service_section["api_key"] = provider_key
elif store_key:
if provider_section is None:
provider_section = {}
providers["alldebrid"] = provider_section
provider_section["api_key"] = store_key
def _flatten_config_entries(config: Dict[str, Any]) -> Dict[Tuple[str, str, str, str], Any]:
entries: Dict[Tuple[str, str, str, str], Any] = {}
for key, value in config.items():
if key in ('store', 'provider', 'tool') and isinstance(value, dict):
for subtype, instances in value.items():
if not isinstance(instances, dict):
continue
if key == 'store':
for name, settings in instances.items():
if not isinstance(settings, dict):
continue
for k, v in settings.items():
entries[(key, subtype, name, k)] = v
else:
for k, v in instances.items():
entries[(key, subtype, 'default', k)] = v
elif not key.startswith('_') and value is not None:
entries[('global', 'none', 'none', key)] = value
return entries
def _count_changed_entries(old_config: Dict[str, Any], new_config: Dict[str, Any]) -> int:
old_entries = _flatten_config_entries(old_config or {})
new_entries = _flatten_config_entries(new_config or {})
changed = {k for k, v in new_entries.items() if old_entries.get(k) != v}
removed = {k for k in old_entries if k not in new_entries}
return len(changed) + len(removed)
def load_config() -> Dict[str, Any]:
global _CONFIG_CACHE, _LAST_SAVED_CONFIG
if _CONFIG_CACHE:
return _CONFIG_CACHE
# Load strictly from database
db_config = get_config_all()
if db_config:
_sync_alldebrid_api_key(db_config)
_CONFIG_CACHE = db_config
_LAST_SAVED_CONFIG = deepcopy(db_config)
try:
# Log a compact summary to help detect startup overwrites/mismatches
provs = list(db_config.get("provider", {}).keys()) if isinstance(db_config.get("provider"), dict) else []
stores = list(db_config.get("store", {}).keys()) if isinstance(db_config.get("store"), dict) else []
mtime = None
try:
mtime = datetime.datetime.fromtimestamp(db.db_path.stat().st_mtime, datetime.timezone.utc).isoformat().replace('+00:00', 'Z')
except Exception:
mtime = None
summary = (
f"Loaded config from {db.db_path.name}: providers={len(provs)} ({', '.join(provs[:10])}{'...' if len(provs)>10 else ''}), "
f"stores={len(stores)} ({', '.join(stores[:10])}{'...' if len(stores)>10 else ''}), mtime={mtime}"
)
log(summary)
# Forensics disabled: audit/mismatch/backup detection removed to simplify code.
except Exception:
logger.exception("Failed to build config load summary from %s", db.db_path)
return db_config
_LAST_SAVED_CONFIG = {}
return {}
def reload_config() -> Dict[str, Any]:
clear_config_cache()
return load_config()
def _acquire_save_lock(timeout: float = _SAVE_LOCK_TIMEOUT):
"""Acquire a cross-process save lock implemented as a directory.
Returns the Path to the created lock directory. Raises ConfigSaveConflict
if the lock cannot be acquired within the timeout.
"""
lock_dir = Path(db.db_path).with_name(_SAVE_LOCK_DIRNAME)
start = time.time()
while True:
try:
lock_dir.mkdir(exist_ok=False)
# Write owner metadata for diagnostics
try:
(lock_dir / "owner.json").write_text(json.dumps({
"pid": os.getpid(),
"ts": time.time(),
"cmdline": " ".join(sys.argv),
}))
except Exception as exc:
logger.exception("Failed to write save lock owner metadata %s: %s", lock_dir, exc)
return lock_dir
except FileExistsError:
# Check for stale lock
try:
owner = lock_dir / "owner.json"
if owner.exists():
data = json.loads(owner.read_text())
ts = data.get("ts") or 0
if time.time() - ts > _SAVE_LOCK_STALE_SECONDS:
try:
import shutil
shutil.rmtree(lock_dir)
continue
except Exception as exc:
logger.exception("Failed to remove stale save lock dir %s", lock_dir)
else:
# No owner file; if directory is old enough consider it stale
try:
if time.time() - lock_dir.stat().st_mtime > _SAVE_LOCK_STALE_SECONDS:
import shutil
shutil.rmtree(lock_dir)
continue
except Exception as exc:
logger.exception("Failed to stat/remove stale save lock dir %s", lock_dir)
except Exception as exc:
logger.exception("Failed to inspect save lock directory %s: %s", lock_dir, exc)
if time.time() - start > timeout:
raise ConfigSaveConflict("Save lock busy; could not acquire in time")
time.sleep(0.1)
def _release_save_lock(lock_dir: Path) -> None:
try:
owner = lock_dir / "owner.json"
try:
if owner.exists():
owner.unlink()
except Exception:
logger.exception("Failed to remove save lock owner file %s", owner)
lock_dir.rmdir()
except Exception:
logger.exception("Failed to release save lock directory %s", lock_dir)
def save_config(config: Dict[str, Any]) -> int:
global _CONFIG_CACHE, _LAST_SAVED_CONFIG
_sync_alldebrid_api_key(config)
# Acquire cross-process save lock to avoid concurrent saves from different
# processes which can lead to race conditions and DB-level overwrite.
lock_dir = None
try:
lock_dir = _acquire_save_lock()
except ConfigSaveConflict:
# Surface a clear exception to callers so they can retry or handle it.
raise
previous_config = deepcopy(_LAST_SAVED_CONFIG)
changed_count = _count_changed_entries(previous_config, config)
def _write_entries() -> int:
global _CONFIG_CACHE, _LAST_SAVED_CONFIG
count = 0
# Use the transaction-provided connection directly to avoid re-acquiring
# the connection lock via db.* helpers which can lead to deadlock.
with db.transaction() as conn:
# Detect concurrent changes by reading the current DB state inside the
# same transaction before mutating it. Use the transaction connection
# directly to avoid acquiring the connection lock again (deadlock).
try:
cur = conn.cursor()
cur.execute("SELECT category, subtype, item_name, key, value FROM config")
rows = cur.fetchall()
current_disk = rows_to_config(rows)
cur.close()
except Exception:
current_disk = {}
if current_disk != _LAST_SAVED_CONFIG:
# If we have no local changes, refresh caches and skip the write.
if changed_count == 0:
log("Skip save: disk configuration changed since last load and no local changes; not writing to DB.")
# Refresh local caches to match the disk
_CONFIG_CACHE = current_disk
_LAST_SAVED_CONFIG = deepcopy(current_disk)
return 0
# Otherwise, abort to avoid overwriting external changes
raise ConfigSaveConflict(
"Configuration on disk changed since you started editing; save aborted to prevent overwrite. Reload and reapply your changes."
)
# Proceed with writing when no conflicting external changes detected
conn.execute("DELETE FROM config")
for key, value in config.items():
if key in ('store', 'provider', 'tool') and isinstance(value, dict):
for subtype, instances in value.items():
if not isinstance(instances, dict):
continue
if key == 'store':
for name, settings in instances.items():
if isinstance(settings, dict):
for k, v in settings.items():
val_str = json.dumps(v) if not isinstance(v, str) else v
conn.execute(
"INSERT OR REPLACE INTO config (category, subtype, item_name, key, value) VALUES (?, ?, ?, ?, ?)",
(key, subtype, name, k, val_str),
)
count += 1
else:
normalized_subtype = subtype
if key == 'provider':
normalized_subtype = _normalize_provider_name(subtype)
if not normalized_subtype:
continue
for k, v in instances.items():
val_str = json.dumps(v) if not isinstance(v, str) else v
conn.execute(
"INSERT OR REPLACE INTO config (category, subtype, item_name, key, value) VALUES (?, ?, ?, ?, ?)",
(key, normalized_subtype, "default", k, val_str),
)
count += 1
else:
if not key.startswith("_") and value is not None:
val_str = json.dumps(value) if not isinstance(value, str) else value
conn.execute(
"INSERT OR REPLACE INTO config (category, subtype, item_name, key, value) VALUES (?, ?, ?, ?, ?)",
("global", "none", "none", key, val_str),
)
count += 1
return count
saved_entries = 0
attempts = 0
while True:
try:
saved_entries = _write_entries()
# Central log entry
log(
f"Synced {saved_entries} entries to {db.db_path} "
f"({changed_count} changed entries)"
)
# Try to checkpoint WAL to ensure main DB file reflects latest state.
# Use a separate short-lived connection to perform the checkpoint so
# we don't contend with our main connection lock or active transactions.
try:
try:
with sqlite3.connect(str(db.db_path), timeout=5.0) as _con:
_con.execute("PRAGMA wal_checkpoint(TRUNCATE)")
except Exception:
with sqlite3.connect(str(db.db_path), timeout=5.0) as _con:
_con.execute("PRAGMA wal_checkpoint")
except Exception as exc:
log(f"Warning: WAL checkpoint failed: {exc}")
# Forensics disabled: audit/logs/backups removed to keep save lean.
# Release the save lock we acquired earlier
try:
if lock_dir is not None and lock_dir.exists():
_release_save_lock(lock_dir)
except Exception as exc:
logger.exception("Failed to release save lock during save flow: %s", exc)
break
except sqlite3.OperationalError as exc:
attempts += 1
locked_error = "locked" in str(exc).lower()
if not locked_error or attempts >= _CONFIG_SAVE_MAX_RETRIES:
log(f"CRITICAL: Database write failed: {exc}")
# Ensure we release potential save lock before bubbling error
try:
if lock_dir is not None and lock_dir.exists():
_release_save_lock(lock_dir)
except Exception as exc:
logger.exception("Failed to release save lock after DB write failure: %s", exc)
raise
delay = _CONFIG_SAVE_RETRY_DELAY * attempts
log(f"Database locked; retry {attempts}/{_CONFIG_SAVE_MAX_RETRIES} in {delay:.2f}s")
time.sleep(delay)
except Exception as exc:
log(f"CRITICAL: Configuration save failed: {exc}")
try:
if lock_dir is not None and lock_dir.exists():
_release_save_lock(lock_dir)
except Exception as exc:
logger.exception("Failed to release save lock after CRITICAL configuration save failure: %s", exc)
raise
clear_config_cache()
_CONFIG_CACHE = config
_LAST_SAVED_CONFIG = deepcopy(config)
return saved_entries
def load() -> Dict[str, Any]:
"""Return the parsed downlow configuration."""
return load_config()
def save(config: Dict[str, Any]) -> int:
"""Persist *config* back to disk."""
return save_config(config)
def save_config_and_verify(config: Dict[str, Any], retries: int = 3, delay: float = 0.15) -> int:
"""Save configuration and verify crucial keys persisted to disk.
This helper performs a best-effort verification loop that reloads the
configuration from disk and confirms that modified API key entries (e.g.
AllDebrid) were written successfully. If verification fails after the
configured number of retries, a RuntimeError is raised.
"""
# Detect an API key that should be verified (provider or store-backed)
expected_key = None
try:
providers = config.get("provider", {}) if isinstance(config, dict) else {}
if isinstance(providers, dict):
entry = providers.get("alldebrid")
if entry is not None:
# _extract_api_key is a small internal helper; reuse the implementation here
if isinstance(entry, dict):
for k in ("api_key", "API_KEY", "apikey", "APIKEY"):
v = entry.get(k)
if isinstance(v, str) and v.strip():
expected_key = v.strip()
break
elif isinstance(entry, str) and entry.strip():
expected_key = entry.strip()
if not expected_key:
store_block = config.get("store", {}) if isinstance(config, dict) else {}
debrid = store_block.get("debrid") if isinstance(store_block, dict) else None
if isinstance(debrid, dict):
srv = debrid.get("all-debrid")
if isinstance(srv, dict):
for k in ("api_key", "API_KEY", "apikey", "APIKEY"):
v = srv.get(k)
if isinstance(v, str) and v.strip():
expected_key = v.strip()
break
elif isinstance(srv, str) and srv.strip():
expected_key = srv.strip()
except Exception as exc:
logger.debug("Failed to determine expected key for save verification: %s", exc, exc_info=True)
expected_key = None
last_exc: Exception | None = None
for attempt in range(1, max(1, int(retries)) + 1):
try:
saved = save_config(config)
if not expected_key:
# Nothing special to verify; return success.
return saved
# Reload directly from disk and compare the canonical debrid/provider keys
clear_config_cache()
reloaded = load_config()
# Provider-level key
prov_block = reloaded.get("provider", {}) if isinstance(reloaded, dict) else {}
prov_key = None
if isinstance(prov_block, dict):
aentry = prov_block.get("alldebrid")
if isinstance(aentry, dict):
for k in ("api_key", "API_KEY", "apikey", "APIKEY"):
v = aentry.get(k)
if isinstance(v, str) and v.strip():
prov_key = v.strip()
break
elif isinstance(aentry, str) and aentry.strip():
prov_key = aentry.strip()
# Store-level key
try:
store_key = get_debrid_api_key(reloaded, service="All-debrid")
except Exception:
store_key = None
if prov_key == expected_key or store_key == expected_key:
try:
# Log a short, masked fingerprint to aid debugging without exposing the key itself
import hashlib
fp = hashlib.sha256(expected_key.encode("utf-8")).hexdigest()[:8]
log(f"Verified AllDebrid API key persisted (fingerprint={fp})")
except Exception:
# If hashing/logging fails, don't abort the save
pass
return saved
# Not yet persisted; log and retry
log(f"Warning: Post-save verification attempt {attempt} failed (expected key not found in DB). Retrying...")
time.sleep(delay * attempt)
except Exception as exc:
last_exc = exc
log(f"Warning: save and verify attempt {attempt} failed: {exc}")
time.sleep(delay * attempt)
# All retries exhausted
raise RuntimeError(f"Post-save verification failed after {retries} attempts: {last_exc}")
def count_changed_entries(config: Dict[str, Any]) -> int:
"""Return the number of changed configuration entries compared to the last saved snapshot.
This is useful for user-facing messages that want to indicate how many entries
were actually modified, not the total number of rows persisted to the database.
"""
return _count_changed_entries(_LAST_SAVED_CONFIG, config)