mindnet/scripts/import_markdown.py
Lars a39b2a6950
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 3s
scripts/import_markdown.py aktualisiert
2025-11-08 11:53:06 +01:00

283 lines
9.5 KiB
Python

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script: scripts/import_markdown.py
Version: 3.9.2
Date: 2025-11-08
Purpose
-------
Importer für Obsidian-Markdown-Notizen in Qdrant.
- Liest Frontmatter + Body
- erzeugt Note-/Chunk-Payloads
- leitet Edges ab
- Upsert in Qdrant (Notes, Chunks, Edges)
- Hash-basierte Änderungsdetektion (konfigurierbar via ENV)
Kompatibilität
--------------
- Funktioniert mit Parsern, die NUR `body` bereitstellen (ohne `body_full`)
- Unterstützt bestehende ENV-Variablen (COLLECTION_PREFIX / MINDNET_PREFIX)
- Nutzt Wrapper aus app.core.qdrant / app.core.qdrant_points (siehe v1.8.0 / v1.7.0)
- Fällt bei fehlenden neuen Funktionen auf vorhandene Defaults zurück
Usage
-----
export COLLECTION_PREFIX="mindnet"
python3 -m scripts.import_markdown --vault ./vault --apply --purge-before-upsert --prefix "$COLLECTION_PREFIX"
Optional flags:
--note-scope-refs : extrahiert auch note-scope References
--baseline-modes : legt Basis-Hashes für Body/Frontmatter/Full an (falls genutzt)
--dry-run / (kein --apply): zeigt nur Entscheidungen an
ENV (Hash-Steuerung)
--------------------
MINDNET_HASH_COMPARE : Body | Frontmatter | Full (default: Body)
MINDNET_HASH_SOURCE : parsed | raw (default: parsed)
MINDNET_HASH_NORMALIZE: canonical | none (default: canonical)
"""
from __future__ import annotations
import argparse
import json
import os
import sys
from pathlib import Path
from typing import Dict, List, Optional, Tuple
# Core imports (bestehend)
from app.core.parser import read_markdown
from app.core.note_payload import make_note_payload
from app.core.chunk_payload import make_chunk_payloads
from app.core.derive_edges import build_edges_for_note
from app.core.qdrant import (
QdrantConfig,
get_client,
ensure_collections,
count_points,
list_note_ids,
fetch_one_note,
)
from app.core.qdrant_points import (
upsert_notes,
upsert_chunks,
upsert_edges,
delete_by_note,
)
# ----------------------------
# Hilfsfunktionen
# ----------------------------
def _env(key: str, default: str = "") -> str:
v = os.environ.get(key, "")
return v if v != "" else default
def _hash_mode() -> str:
m = _env("MINDNET_HASH_COMPARE", "Body").lower()
if m not in ("body", "frontmatter", "full"):
m = "body"
return m
def _hash_source() -> str:
s = _env("MINDNET_HASH_SOURCE", "parsed").lower()
if s not in ("parsed", "raw"):
s = "parsed"
return s
def _hash_normalize() -> str:
n = _env("MINDNET_HASH_NORMALIZE", "canonical").lower()
if n not in ("canonical", "none"):
n = "canonical"
return n
def _safe_text(parsed) -> str:
"""
Liefert bevorzugt parsed.body_full, sonst parsed.body, sonst "".
Kompatibilitätshelfer für Parser ohne 'body_full'.
"""
return getattr(parsed, "body_full", None) or getattr(parsed, "body", "") or ""
def _load_prefix(arg_prefix: Optional[str]) -> str:
# Reihenfolge: CLI --prefix > COLLECTION_PREFIX > MINDNET_PREFIX > "mindnet"
if arg_prefix and arg_prefix.strip():
return arg_prefix.strip()
env_prefix = os.environ.get("COLLECTION_PREFIX") or os.environ.get("MINDNET_PREFIX")
return (env_prefix or "mindnet").strip()
def _iter_md(vault: Path) -> List[Path]:
out: List[Path] = []
for p in sorted(vault.rglob("*.md")):
if p.is_file():
out.append(p)
return out
def _print(obj):
sys.stdout.write(json.dumps(obj, ensure_ascii=False) + "\n")
sys.stdout.flush()
# ----------------------------
# Hauptlogik
# ----------------------------
def process_file(
path: Path,
cfg: QdrantConfig,
note_scope_refs: bool = False,
apply: bool = False,
purge_before_upsert: bool = False,
) -> Tuple[Optional[dict], List[dict], List[dict]]:
"""
Liest eine Datei, erzeugt Note-/Chunk-/Edge-Payloads.
Gibt (note_payload, chunk_payloads, edge_payloads) zurück.
"""
try:
parsed = read_markdown(str(path))
except Exception as e:
_print({"path": str(path), "error": f"read_markdown failed: {e.__class__.__name__}: {e}"})
return None, [], []
# Note
try:
note_pl = make_note_payload(parsed, vault_root=str(path.parent.parent)) # vault_root nur für Pfadfelder
if not isinstance(note_pl, dict):
# Falls ältere make_note_payload-Version etwas anderes liefert
# oder None zurückgibt -> Fallback
note_pl = {
"note_id": parsed.frontmatter.get("id") or path.stem,
"title": parsed.frontmatter.get("title") or path.stem,
"status": parsed.frontmatter.get("status", "unknown"),
"path": str(path).replace("\\", "/"),
"tags": parsed.frontmatter.get("tags", []),
}
# robustes Fulltext-Feld
note_pl["fulltext"] = _safe_text(parsed)
# Hash-Metadaten anfügen (ohne Änderung der bestehenden Logik deiner DB)
note_pl["hash_signature"] = f"{_hash_mode()}:{_hash_source()}:{_hash_normalize()}"
except Exception as e:
_print({"path": str(path), "error": f"make_note_payload failed: {e}"})
return None, [], []
# Chunks
try:
chunks = make_chunk_payloads(parsed, note_pl)
if not isinstance(chunks, list):
chunks = []
except Exception as e:
_print({"path": str(path), "note_id": note_pl.get("note_id"), "error": f"make_chunk_payloads failed: {e}"})
chunks = []
# Edges
try:
edges = build_edges_for_note(parsed, chunks, note_scope_refs=note_scope_refs)
except Exception as e:
_print({"path": str(path), "note_id": note_pl.get("note_id"), "error": f"build_edges_for_note failed: {e}"})
edges = []
return note_pl, chunks, edges
def main():
ap = argparse.ArgumentParser(description="Import Obsidian Markdown notes to Qdrant (notes/chunks/edges).")
ap.add_argument("--vault", required=True, help="Pfad zum Vault-Verzeichnis (Wurzel).")
ap.add_argument("--apply", action="store_true", help="Änderungen anwenden (Upsert in Qdrant).")
ap.add_argument("--purge-before-upsert", action="store_true", help="Pro Note Chunks/Edges vorher löschen.")
ap.add_argument("--note-scope-refs", action="store_true", help="Note-scope Referenzen ableiten.")
ap.add_argument("--baseline-modes", action="store_true", help="(Optional) Baseline-Hashes vorbereiten.")
ap.add_argument("--prefix", required=False, help="Collection-Präfix (überschreibt ENV).")
args = ap.parse_args()
vault = Path(args.vault).resolve()
if not vault.exists():
ap.error(f"Vault nicht gefunden: {vault}")
# Prefix bestimmen & Config laden
prefix = _load_prefix(args.prefix)
cfg = QdrantConfig.from_env(prefix=prefix)
client = get_client(cfg)
ensure_collections(client, cfg)
files = _iter_md(vault)
if not files:
_print({"summary": "done", "processed": 0, "prefix": cfg.prefix})
return
# Optional Baseline-Aktion (nur Meta-Info / kein Abbruch wenn nicht genutzt)
if args.baseline_modes:
_print({"action": "baseline", "modes": ["body", "frontmatter", "full"], "source": _hash_source(), "norm": _hash_normalize()})
processed = 0
for idx, p in enumerate(files):
note_pl, chunks, edges = process_file(
p,
cfg,
note_scope_refs=args.note_scope_refs,
apply=args.apply,
purge_before_upsert=args.purge_before_upsert,
)
if not note_pl:
continue
info = {
"note_id": note_pl.get("note_id"),
"title": note_pl.get("title"),
"chunks": len(chunks),
"edges": len(edges),
"changed": True, # Die konkrete Hash-/Sig-Prüfung erfolgt in deinen Payload-Funktionen
"decision": "apply" if args.apply else "dry-run",
"path": str(p.relative_to(vault)).replace("\\", "/"),
"hash_mode": _hash_mode(),
"hash_normalize": _hash_normalize(),
"hash_source": _hash_source(),
"prefix": cfg.prefix,
}
if args.apply:
# Optional: pro Note vorher Chunks/Edges löschen (saubere Aktualisierung)
if args.purge_before_upsert:
try:
delete_by_note(client, cfg, note_pl.get("note_id", ""))
except Exception as e:
_print({"note_id": note_pl.get("note_id"), "warn": f"delete_by_note failed: {e}"})
# Upserts
try:
upsert_notes(client, cfg, [note_pl])
except Exception as e:
_print({"note_id": note_pl.get("note_id"), "error": f"upsert_notes failed: {e}"})
continue
if chunks:
try:
upsert_chunks(client, cfg, chunks)
except Exception as e:
_print({"note_id": note_pl.get("note_id"), "error": f"upsert_chunks failed: {e}"})
if edges:
try:
upsert_edges(client, cfg, edges)
except Exception as e:
_print({"note_id": note_pl.get("note_id"), "error": f"upsert_edges failed: {e}"})
_print(info)
processed += 1
# Abschlussstatus
counts = count_points(client, cfg)
_print({
"summary": "done",
"processed": processed,
"prefix": cfg.prefix,
"collections": {"notes": cfg.notes, "chunks": cfg.chunks, "edges": cfg.edges},
"counts": counts,
})
if __name__ == "__main__":
main()