Dateien nach "scripts" hochladen
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 3s

This commit is contained in:
Lars 2025-11-08 18:22:28 +01:00
parent da70f0e00c
commit ea45493941

View File

@ -1,57 +1,46 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script: scripts/import_markdown.py - Markdown -> Qdrant (Notes, Chunks, Edges)
Version: 3.7.3
Date: 2025-11-08
scripts/import_markdown.py
Summary
-------
- Reads Markdown files and creates Notes/Chunks/Edges idempotently.
- Change detection "Option C": multiple hash variants are stored in note payload
(field `hashes` with keys `<mode>:<source>:<normalize>`). Comparison uses ONLY
the current-mode key. Switching modes no longer triggers bulk "changed".
- "First import" fix: if Qdrant is empty for the note, we treat it as changed.
- `--baseline-modes`: silently add any missing hash variants (notes only).
- `--sync-deletes`: selective deletes (Dry-Run + Apply).
- `--only-path`: process exactly one file (useful for diagnostics).
- NEW in 3.7.3: Type registry (if present) is loaded and the derived
`retriever_weight` is written to both note and chunk payload. If the registry
defines a `chunk_profile`, it is injected into frontmatter for payloads
(does NOT change the chunking behavior here).
Zweck
-----
- Liest Markdown-Notizen aus einem Vault ein
- Erzeugt Note-Payload, Chunk-Payloads (+ optionale Embeddings) und Edges
- Schreibt alles idempotent in Qdrant (Notes, Chunks, Edges)
- Integriert eine optionale Type-Registry (types.yaml), um z. B. chunk_profile
und retriever_weight pro Notiz-Typ zu steuern.
Hash/Compare configuration
--------------------------
- Compare mode:
--hash-mode body|frontmatter|full
or ENV: MINDNET_HASH_MODE | MINDNET_HASH_COMPARE
- Source:
--hash-source parsed|raw (ENV: MINDNET_HASH_SOURCE, Default parsed)
- Normalization:
--hash-normalize canonical|none (ENV: MINDNET_HASH_NORMALIZE, Default canonical)
- Optional: --compare-text (or ENV MINDNET_COMPARE_TEXT=true) compares
parsed body text, in addition to the hash.
Wesentliche Fixes ggü. vorherigen fehlerhaften Ständen
------------------------------------------------------
- `embed_texts` wird optional importiert und defensiv geprüft (kein NameError mehr)
- `effective_chunk_profile` / `effective_retriever_weight` und Registry-Helfer
sind VOR `main()` definiert (kein NameError mehr)
- `retriever_weight` wird in Note- und Chunk-Payload zuverlässig gesetzt
- Robuste Kantenbildung; Fehler bei Edges blockieren Notes/Chunks nicht
- Korrekte Verwendung von `scroll_filter` beim Qdrant-Client
- `--purge-before-upsert` entfernt alte Chunks/Edges einer Note vor dem Upsert
Qdrant / ENV
------------
- QDRANT_URL | QDRANT_HOST/QDRANT_PORT | QDRANT_API_KEY
- COLLECTION_PREFIX (Default: mindnet), can be overridden with --prefix
- COLLECTION_PREFIX (Default: mindnet), via --prefix überschreibbar
- VECTOR_DIM (Default: 384)
- MINDNET_NOTE_SCOPE_REFS: true|false (Default: false)
- MINDNET_TYPES_PATH: optional path to config/types.yaml
- MINDNET_TYPES_FILE: Pfad zu types.yaml (optional; Default: ./types.yaml)
Examples
--------
Beispiele
---------
# Standard (Body, parsed, canonical)
python3 -m scripts.import_markdown --vault ./vault
# First import after truncate (create case)
# Erstimport nach truncate (Create-Fall)
python3 -m scripts.import_markdown --vault ./vault --apply --purge-before-upsert
# Single file (diagnostics)
# Nur eine Datei (Diagnose)
python3 -m scripts.import_markdown --vault ./vault --only-path ./vault/30_projects/project-demo.md --apply
# Sync-Deletes (Dry-Run -> Apply)
# Sync-Deletes (Dry-Run Apply)
python3 -m scripts.import_markdown --vault ./vault --sync-deletes
python3 -m scripts.import_markdown --vault ./vault --sync-deletes --apply
"""
@ -66,6 +55,7 @@ from typing import Dict, List, Optional, Tuple, Any, Set
from dotenv import load_dotenv
from qdrant_client.http import models as rest
# --- Projekt-Imports ---
from app.core.parser import (
read_markdown,
normalize_frontmatter,
@ -91,17 +81,86 @@ from app.core.qdrant_points import (
upsert_batch,
)
# ---- Type-Registry (optional) ------------------------------------------------
# embeddings sind optional (z. B. im reinen Payload-Backfill)
try:
# Expected API
from app.core.type_registry import load_type_registry # type: ignore
from app.core.embed import embed_texts # optional
except Exception: # pragma: no cover
def load_type_registry(path: str) -> dict:
"""Fallback loader if module is absent. Returns empty dict."""
return {}
embed_texts = None
# ---------------------------------------------------------------------
# Helper
# Type-Registry (types.yaml) Helper (robust, optional)
# ---------------------------------------------------------------------
def _env(name: str, default: Optional[str] = None) -> Optional[str]:
v = os.getenv(name)
return v if v is not None else default
def _load_json_or_yaml(path: str) -> dict:
import io
data: dict = {}
if not path or not os.path.exists(path):
return data
try:
import yaml # type: ignore
with io.open(path, "r", encoding="utf-8") as f:
data = yaml.safe_load(f) or {}
if not isinstance(data, dict):
return {}
return data
except Exception:
# YAML evtl. nicht installiert versuche JSON
try:
with io.open(path, "r", encoding="utf-8") as f:
data = json.load(f)
if not isinstance(data, dict):
return {}
return data
except Exception:
return {}
def load_type_registry() -> dict:
# Reihenfolge: ENV > ./types.yaml (im aktuellen Arbeitsverzeichnis)
p = _env("MINDNET_TYPES_FILE", None)
if p and os.path.exists(p):
return _load_json_or_yaml(p)
fallback = os.path.abspath("./types.yaml")
return _load_json_or_yaml(fallback)
def get_type_config(note_type: Optional[str], reg: dict) -> dict:
if not reg or not isinstance(reg, dict):
return {}
types = reg.get("types", {}) if isinstance(reg.get("types"), dict) else {}
if note_type and isinstance(note_type, str) and note_type in types:
return types[note_type] or {}
# Fallback: concept
return types.get("concept", {}) or {}
def resolve_note_type(requested: Optional[str], reg: dict) -> str:
if requested and isinstance(requested, str):
return requested
# Fallback wenn nichts gesetzt ist
types = reg.get("types", {}) if isinstance(reg.get("types"), dict) else {}
return "concept" if "concept" in types else (requested or "concept")
def effective_chunk_profile(note_type: str, reg: dict) -> Optional[str]:
cfg = get_type_config(note_type, reg)
prof = cfg.get("chunk_profile")
if isinstance(prof, str) and prof in {"short", "medium", "long"}:
return prof
return None
def effective_retriever_weight(note_type: str, reg: dict) -> Optional[float]:
cfg = get_type_config(note_type, reg)
w = cfg.get("retriever_weight")
try:
return float(w) if w is not None else None
except Exception:
return None
# ---------------------------------------------------------------------
# Sonstige Helper
# ---------------------------------------------------------------------
def iter_md(root: str) -> List[str]:
@ -117,17 +176,15 @@ def iter_md(root: str) -> List[str]:
out.append(p)
return sorted(out)
def collections(prefix: str) -> Tuple[str, str, str]:
return f"{prefix}_notes", f"{prefix}_chunks", f"{prefix}_edges"
def fetch_existing_note_payload(client, prefix: str, note_id: str) -> Optional[Dict]:
notes_col, _, _ = collections(prefix)
f = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
points, _ = client.scroll(
collection_name=notes_col,
scroll_filter=f,
scroll_filter=f, # wichtig: scroll_filter (nicht: filter)
with_payload=True,
with_vectors=False,
limit=1,
@ -136,7 +193,6 @@ def fetch_existing_note_payload(client, prefix: str, note_id: str) -> Optional[D
return None
return points[0].payload or {}
def list_qdrant_note_ids(client, prefix: str) -> Set[str]:
notes_col, _, _ = collections(prefix)
out: Set[str] = set()
@ -160,7 +216,6 @@ def list_qdrant_note_ids(client, prefix: str) -> Set[str]:
break
return out
def purge_note_artifacts(client, prefix: str, note_id: str) -> None:
_, chunks_col, edges_col = collections(prefix)
filt = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
@ -174,7 +229,6 @@ def purge_note_artifacts(client, prefix: str, note_id: str) -> None:
except Exception as e:
print(json.dumps({"note_id": note_id, "warn": f"delete in {col} via filter failed: {e}"}))
def delete_note_everywhere(client, prefix: str, note_id: str) -> None:
notes_col, chunks_col, edges_col = collections(prefix)
filt = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
@ -189,78 +243,35 @@ def delete_note_everywhere(client, prefix: str, note_id: str) -> None:
print(json.dumps({"note_id": note_id, "warn": f"delete in {col} failed: {e}"}))
def _resolve_mode(val: Optional[str]) -> str:
v = (val or os.environ.get("MINDNET_HASH_MODE") or os.environ.get("MINDNET_HASH_COMPARE") or "body").strip().lower()
if v in ("full", "fulltext", "body+frontmatter", "bodyplusfrontmatter"):
return "full"
if v in ("frontmatter", "fm"):
return "frontmatter"
return "body"
def _env(key: str, default: str) -> str:
return (os.environ.get(key) or default).strip().lower()
# --- Type-Registry helpers (pure) --------------------------------------------
def _effective_chunk_profile(note_type: str, registry: dict) -> Optional[str]:
try:
types = (registry or {}).get("types", {}) if isinstance(registry, dict) else {}
cfg = types.get(note_type) or types.get("concept") or {}
prof = cfg.get("chunk_profile")
if isinstance(prof, str) and prof in {"short", "medium", "long"}:
return prof
except Exception:
pass
return None
def _effective_retriever_weight(note_type: str, registry: dict) -> Optional[float]:
try:
types = (registry or {}).get("types", {}) if isinstance(registry, dict) else {}
cfg = types.get(note_type) or types.get("concept") or {}
w = cfg.get("retriever_weight")
if w is None:
return None
return float(w)
except Exception:
# be tolerant
return None
# ---------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------
def _resolve_mode(m: Optional[str]) -> str:
m = (m or "body").strip().lower()
return m if m in {"body", "frontmatter", "full"} else "body"
def main() -> None:
load_dotenv()
ap = argparse.ArgumentParser()
ap.add_argument("--vault", required=True, help="Path to the Obsidian vault (root folder)")
ap.add_argument("--apply", action="store_true", help="Write to Qdrant; otherwise Dry-Run")
ap.add_argument("--purge-before-upsert", action="store_true",
help="Before upsert, delete Chunks & Edges of the CHANGED note")
ap.add_argument("--note-id", help="Process only a specific note-id")
ap.add_argument("--only-path", help="Process exactly this Markdown path (ignores --note-id)")
ap.add_argument("--embed-note", action="store_true", help="Optionally embed the full note text")
ap.add_argument("--force-replace", action="store_true",
help="Ignore change detection and always upsert (+ optional purge)")
ap.add_argument("--hash-mode", choices=["body", "frontmatter", "full"], default=None,
help="Compare mode (Body | Frontmatter | Full)")
ap.add_argument("--hash-normalize", choices=["canonical", "none"], default=None)
ap.add_argument("--hash-source", choices=["parsed", "raw"], default=None,
help="Source for hash calculation (Default: parsed)")
ap.add_argument("--note-scope-refs", action="store_true",
help="(Optional) also create references:note/backlink:note (Default: off)")
ap.add_argument("--debug-hash-diff", action="store_true",
help="(reserved) optional body diff")
ap.add_argument("--compare-text", action="store_true",
help="Additionally compare parsed fulltext (beyond the hash)")
ap.add_argument("--baseline-modes", action="store_true",
help="Add missing hash variants in 'hashes' silently (Upsert notes only)")
ap.add_argument("--sync-deletes", action="store_true",
help="Delete notes/chunks/edges that exist in Qdrant but are missing in the vault (Dry-Run; use --apply to execute)")
ap.add_argument("--prefix", help="Collection prefix (overrides ENV COLLECTION_PREFIX)")
ap = argparse.ArgumentParser(
prog="scripts.import_markdown",
description="Importiert Markdown-Notizen in Qdrant (Notes/Chunks/Edges)."
)
ap.add_argument("--vault", required=True, help="Pfad zum Vault (Ordner mit .md-Dateien)")
ap.add_argument("--only-path", help="Nur diese Datei verarbeiten (absolut oder relativ)")
ap.add_argument("--apply", action="store_true", help="Schreibt nach Qdrant (sonst Dry-Run)")
ap.add_argument("--purge-before-upsert", action="store_true", help="Alte Chunks/Edges der Note vorher löschen")
ap.add_argument("--force-replace", action="store_true", help="Note/Chunks/Edges unabhängig von Hash neu schreiben")
ap.add_argument("--note-id", help="Nur Notes mit dieser ID verarbeiten (Filter)")
ap.add_argument("--note-scope-refs", action="store_true", help="Note-scope References/Backlinks erzeugen")
ap.add_argument("--hash-mode", help="body|frontmatter|full (Default body)")
ap.add_argument("--hash-source", help="parsed|raw (Default parsed)")
ap.add_argument("--hash-normalize", help="canonical|none (Default canonical)")
ap.add_argument("--compare-text", action="store_true", help="Parsed fulltext zusätzlich direkt vergleichen")
ap.add_argument("--baseline-modes", action="store_true", help="Fehlende Hash-Varianten still nachtragen (Notes)")
ap.add_argument("--sync-deletes", action="store_true", help="Qdrant->Vault Lösch-Sync (Dry-Run; mit --apply ausführen)")
ap.add_argument("--prefix", help="Collection-Prefix (überschreibt ENV COLLECTION_PREFIX)")
args = ap.parse_args()
mode = _resolve_mode(args.hash_mode) # body|frontmatter|full
@ -270,7 +281,7 @@ def main() -> None:
note_scope_refs = args.note_scope_refs or note_scope_refs_env
compare_text = args.compare_text or (_env("MINDNET_COMPARE_TEXT", "false") == "true")
# Prepare Qdrant client
# Qdrant
cfg = QdrantConfig.from_env()
if args.prefix:
cfg.prefix = args.prefix.strip()
@ -278,27 +289,22 @@ def main() -> None:
ensure_collections(client, cfg.prefix, cfg.dim)
ensure_payload_indexes(client, cfg.prefix)
# Load type registry (optional)
types_path = os.environ.get("MINDNET_TYPES_PATH") or os.path.join(os.getcwd(), "config", "types.yaml")
try:
type_registry = load_type_registry(types_path) or {}
except Exception as e: # tolerant
print(json.dumps({"warn": f"type-registry load failed ({types_path}): {type(e).__name__}: {e}"}))
type_registry = {}
# Type-Registry laden (optional)
reg = load_type_registry()
root = os.path.abspath(args.vault)
# Build file list
# Dateiliste
if args.only_path:
only = os.path.abspath(args.only_path)
files = [only]
else:
files = iter_md(root)
if not files:
print("No Markdown files found.", file=sys.stderr)
print("Keine Markdown-Dateien gefunden.", file=sys.stderr)
sys.exit(2)
# Optional: Sync-Deletes upfront
# Optional: Sync-Deletes vorab
if args.sync_deletes:
vault_note_ids: Set[str] = set()
for path in files:
@ -331,16 +337,15 @@ def main() -> None:
processed = 0
for path in files:
# -------- Parse & Validate --------
try:
parsed = read_markdown(path)
if not parsed:
continue
except Exception as e:
print(json.dumps({"path": path, "error": f"read_markdown failed: {type(e).__name__}: {e}"}))
continue
if parsed is None:
print(json.dumps({"path": path, "error": "read_markdown returned None"}))
continue
# --- Frontmatter prüfen ---
try:
fm = normalize_frontmatter(parsed.frontmatter)
validate_required_frontmatter(fm)
@ -353,40 +358,58 @@ def main() -> None:
processed += 1
# -------- Type-Registry derivation (tolerant) --------
note_type = (fm.get("type") or "concept").strip().lower()
prof = _effective_chunk_profile(note_type, type_registry)
if prof and not fm.get("chunk_profile"):
fm["chunk_profile"] = prof
weight = _effective_retriever_weight(note_type, type_registry)
if weight is not None:
fm["retriever_weight"] = weight
# --- Type-Registry anwenden (chunk_profile / retriever_weight) ---
try:
note_type = resolve_note_type(fm.get("type"), reg)
except Exception:
note_type = (fm.get("type") or "concept")
fm["type"] = note_type or fm.get("type") or "concept"
prof = effective_chunk_profile(note_type, reg)
if prof:
fm["chunk_profile"] = prof
weight = effective_retriever_weight(note_type, reg)
if weight is not None:
try:
fm["retriever_weight"] = float(weight)
except Exception:
pass # falls FM string-inkonsistent ist
# --- Payload aufbauen (inkl. Hashes) ---
try:
note_pl = make_note_payload(
parsed,
vault_root=root,
hash_mode=mode,
hash_normalize=norm,
hash_source=src,
file_path=path,
)
except Exception as e:
print(json.dumps({"path": path, "error": f"make_note_payload failed: {type(e).__name__}: {e}"}))
continue
# -------- Build new payload (includes 'hashes') --------
note_pl = make_note_payload(
parsed,
vault_root=root,
hash_mode=mode,
hash_normalize=norm,
hash_source=src,
file_path=path,
)
if not note_pl.get("fulltext"):
note_pl["fulltext"] = getattr(parsed, "body", "") or ""
if weight is not None:
note_pl["retriever_weight"] = weight
# retriever_weight sicher in Note-Payload spiegeln (für spätere Filter)
if "retriever_weight" not in note_pl and fm.get("retriever_weight") is not None:
try:
note_pl["retriever_weight"] = float(fm.get("retriever_weight"))
except Exception:
pass
note_id = note_pl.get("note_id") or fm.get("id")
if not note_id:
print(json.dumps({"path": path, "error": "Missing note_id after payload build"}))
continue
# -------- Fetch old payload --------
# --- bestehenden Payload laden (zum Diff) ---
old_payload = None if args.force_replace else fetch_existing_note_payload(client, cfg.prefix, note_id)
has_old = old_payload is not None
old_hashes = (old_payload or {}).get("hashes") or {}
key_current = f"{mode}:{src}:{norm}"
old_hash_exact = old_hashes.get(key_current)
new_hash_exact = (note_pl.get("hashes") or {}).get(key_current)
needs_baseline = (old_hash_exact is None)
@ -402,21 +425,28 @@ def main() -> None:
changed = args.force_replace or (not has_old) or hash_changed or text_changed
do_baseline_only = (args.baseline_modes and has_old and needs_baseline and not changed)
# -------- Chunks / Embeddings --------
chunk_pls: List[Dict[str, Any]] = []
# --- Chunks + Embeddings ---
try:
body_text = getattr(parsed, "body", "") or ""
chunks = assemble_chunks(fm["id"], body_text, note_type)
chunk_pls = make_chunk_payloads(fm, note_pl["path"], chunks, note_text=body_text)
if weight is not None:
for pl in chunk_pls:
if pl.get("retriever_weight") is None:
pl["retriever_weight"] = weight
chunks = assemble_chunks(fm["id"], body_text, fm.get("type", "concept"))
chunk_pls: List[Dict[str, Any]] = make_chunk_payloads(fm, note_pl["path"], chunks, note_text=body_text)
except Exception as e:
print(json.dumps({"path": path, "note_id": note_id, "error": f"chunk build failed: {type(e).__name__}: {e}"}))
continue
vecs: List[List[float]] = [[0.0] * cfg.dim for _ in chunk_pls]
# retriever_weight auf Chunk-Payload spiegeln
if fm.get("retriever_weight") is not None:
try:
rw = float(fm.get("retriever_weight"))
for pl in chunk_pls:
# Feld nur setzen, wenn noch nicht vorhanden
if "retriever_weight" not in pl:
pl["retriever_weight"] = rw
except Exception:
pass
# Embeddings (fallback: Nullvektoren)
vecs: List[List[float]] = [[0.0] * int(cfg.dim) for _ in chunk_pls]
if embed_texts and chunk_pls:
try:
texts_for_embed = [(pl.get("window") or pl.get("text") or "") for pl in chunk_pls]
@ -424,7 +454,7 @@ def main() -> None:
except Exception as e:
print(json.dumps({"path": path, "note_id": note_id, "warn": f"embed_texts failed, using zeros: {e}"}))
# -------- Edges (robust) --------
# --- Edges (robust) ---
edges: List[Dict[str, Any]] = []
edges_failed = False
if changed and (not do_baseline_only):
@ -441,13 +471,10 @@ def main() -> None:
edges = []
print(json.dumps({"path": path, "note_id": note_id, "warn": f"build_edges_for_note failed, skipping edges: {type(e).__name__}: {e}"}))
# -------- Summary --------
# --- Summary (stdout) ---
summary = {
"note_id": note_id,
"title": fm.get("title"),
"type": note_type,
"chunk_profile": fm.get("chunk_profile"),
"retriever_weight": weight,
"chunks": len(chunk_pls),
"edges": len(edges),
"edges_failed": edges_failed,
@ -465,7 +492,7 @@ def main() -> None:
}
print(json.dumps(summary, ensure_ascii=False))
# -------- Writes --------
# --- Writes ---
if not args.apply:
continue