mindnet/scripts/import_markdown.py
Lars d7e5f398f0
Some checks failed
Deploy mindnet to llm-node / deploy (push) Failing after 2s
scripts/import_markdown.py aktualisiert
2025-09-09 12:40:54 +02:00

284 lines
11 KiB
Python

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script: scripts/import_markdown.py — Markdown → Qdrant (Notes, Chunks, Edges)
Version: 3.4.1
Datum: 2025-09-09
Kurzbeschreibung
----------------
- Liest Markdown-Dateien ein, erzeugt Notes/Chunks/Edges **idempotent**.
- Change-Detection (nur **Inhalte**, keine FS-Zeitstempel) konfigurierbar:
* ``--hash-mode``: body | frontmatter | body+frontmatter | full (Alias)
- Env: ``MINDNET_HASH_MODE`` **oder** ``MINDNET_HASH_COMPARE`` (Body/Frontmatter/Full)
* ``--hash-normalize``: canonical | none (Default: canonical)
* ``--hash-source``: parsed (Default) | raw
- "raw" hasht den **ungeparsten** Body aus der Datei (Frontmatter vorher entfernt).
- Optionales Diff-Logging: ``--debug-hash-diff`` zeigt bei Bedarf einen kompakten Diff.
ENV / Qdrant
------------
- QDRANT_URL | QDRANT_HOST/QDRANT_PORT | QDRANT_API_KEY
- COLLECTION_PREFIX (Default: mindnet)
- VECTOR_DIM (Default: 384)
- MINDNET_NOTE_SCOPE_REFS: true|false (Default: false)
Aufruf
------
python3 -m scripts.import_markdown --vault ./vault
python3 -m scripts.import_markdown --vault ./vault --apply
python3 -m scripts.import_markdown --vault ./vault --apply --hash-source raw --hash-normalize none
MINDNET_HASH_COMPARE=Full python3 -m scripts.import_markdown --vault ./vault --apply
"""
from __future__ import annotations
import argparse
import difflib
import json
import os
import sys
from typing import Dict, List, Optional, Tuple
from dotenv import load_dotenv
from qdrant_client.http import models as rest
from app.core.parser import (
read_markdown,
normalize_frontmatter,
validate_required_frontmatter,
)
from app.core.note_payload import make_note_payload
from app.core.chunker import assemble_chunks
from app.core.chunk_payload import make_chunk_payloads
from app.core.edges import build_edges_for_note
from app.core.qdrant import (
QdrantConfig,
get_client,
ensure_collections,
ensure_payload_indexes,
)
from app.core.qdrant_points import (
points_for_chunks,
points_for_note,
points_for_edges,
upsert_batch,
)
try:
from app.core.embed import embed_texts # optional
except Exception:
embed_texts = None
# ---------------------------------------------------------------------
# Helpers
# ---------------------------------------------------------------------
def iter_md(root: str) -> List[str]:
out: List[str] = []
for dirpath, _, filenames in os.walk(root):
for fn in filenames:
if not fn.lower().endswith(".md"):
continue
p = os.path.join(dirpath, fn)
pn = p.replace("\\", "/")
if any(ex in pn for ex in ["/.obsidian/", "/_backup_frontmatter/", "/_imported/"]):
continue
out.append(p)
return sorted(out)
def collections(prefix: str) -> Tuple[str, str, str]:
return f"{prefix}_notes", f"{prefix}_chunks", f"{prefix}_edges"
def fetch_existing_note_payload(client, prefix: str, note_id: str) -> Optional[Dict]:
notes_col, _, _ = collections(prefix)
f = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
points, _ = client.scroll(
collection_name=notes_col,
scroll_filter=f,
with_payload=True,
with_vectors=False,
limit=1,
)
if not points:
return None
return points[0].payload or {}
def purge_note_artifacts(client, prefix: str, note_id: str) -> None:
_, chunks_col, edges_col = collections(prefix)
f_chunks = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
client.delete(collection_name=chunks_col, points_selector=f_chunks, wait=True)
f_edges = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
client.delete(collection_name=edges_col, points_selector=f_edges, wait=True)
def _normalize_rel_path(abs_path: str, vault_root: str) -> str:
try:
rel = os.path.relpath(abs_path, vault_root)
except Exception:
rel = abs_path
return rel.replace("\\", "/").lstrip("/")
# ---------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------
def main() -> None:
load_dotenv()
ap = argparse.ArgumentParser()
ap.add_argument("--vault", required=True, help="Pfad zum Obsidian-Vault (Root-Ordner)")
ap.add_argument("--apply", action="store_true", help="Schreibt in Qdrant; ohne Flag nur Dry-Run")
ap.add_argument("--purge-before-upsert", action="store_true",
help="Vor Upsert Chunks & Edges der GEÄNDERTEN Note löschen")
ap.add_argument("--note-id", help="Nur eine bestimmte Note-ID verarbeiten")
ap.add_argument("--embed-note", action="store_true", help="Optional: Note-Volltext einbetten")
ap.add_argument("--force-replace", action="store_true",
help="Änderungserkennung ignorieren und immer upserten (+ optional Purge)")
ap.add_argument("--hash-mode", choices=["body", "frontmatter", "body+frontmatter", "full"], default=None,
help="Vergleichsmodus: Body | Frontmatter | body+frontmatter (Alias: full)")
ap.add_argument("--hash-normalize", choices=["canonical", "none"], default=None)
ap.add_argument("--hash-source", choices=["parsed", "raw"], default=None,
help="Quelle für die Hash-Berechnung (Default: parsed)")
ap.add_argument("--note-scope-refs", action="store_true",
help="(Optional) erzeugt zusätzlich references:note (Default: aus)")
ap.add_argument("--debug-hash-diff", action="store_true",
help="Zeigt bei Bedarf einen kurzen Diff zwischen altem und neuem Body")
args = ap.parse_args()
note_scope_refs_env = (os.environ.get("MINDNET_NOTE_SCOPE_REFS", "false").strip().lower() == "true")
note_scope_refs = args.note_sCope_refs if hasattr(args, "note_sCope_refs") else args.note_scope_refs # defensive
note_scope_refs = note_scope_refs or note_scope_refs_env
cfg = QdrantConfig.from_env()
client = get_client(cfg)
ensure_collections(client, cfg.prefix, cfg.dim)
ensure_payload_indexes(client, cfg.prefix)
root = os.path.abspath(args.vault)
files = iter_md(root)
if not files:
print("Keine Markdown-Dateien gefunden.", file=sys.stderr)
sys.exit(2)
processed = 0
for path in files:
parsed = read_markdown(path)
fm = normalize_frontmatter(parsed.frontmatter)
try:
validate_required_frontmatter(fm)
except Exception as e:
print(json.dumps({"path": path, "error": f"Frontmatter invalid: {e}"}))
continue
if args.note_id and fm.get("id") != args.note_id:
continue
processed += 1
# Note-Payload (inkl. Hash-Steuerung & Quelle)
note_pl = make_note_payload(
parsed,
vault_root=root,
hash_mode=args.hash_mode,
hash_normalize=args.hash_normalize,
hash_source=args.hash_source,
file_path=path,
)
if "fulltext" not in (note_pl or {}):
note_pl["fulltext"] = parsed.body or ""
if note_pl.get("path"):
note_pl["path"] = _normalize_rel_path(
os.path.join(root, note_pl["path"]) if not os.path.isabs(note_pl["path"]) else note_pl["path"], root
)
else:
note_pl["path"] = _normalize_rel_path(parsed.path, root)
note_id = note_pl["note_id"]
# Change-Detection (nur Inhalte, keine FS-Timestamps)
old_payload = None if args.force_replace else fetch_existing_note_payload(client, cfg.prefix, note_id)
old_hash = None if not old_payload else old_payload.get("hash_fulltext")
new_hash = note_pl.get("hash_fulltext")
changed = args.force_replace or (old_hash != new_hash)
# Optionales Debugging: kompakten Diff anzeigen
if args.debug_hash_diff:
old_text = (old_payload or {}).get("fulltext") or ""
new_text = note_pl.get("fulltext") or ""
# Wenn Hash gleich, aber Text verschieden → Hinweis auf Normalisierung/Quelle
if (old_hash == new_hash) and old_text != new_text:
print(json.dumps({
"debug": "hash_equal_but_text_differs",
"note_id": note_id,
"hash_mode": args.hash_mode or os.environ.get("MINDNET_HASH_MODE") or os.environ.get("MINDNET_HASH_COMPARE", "body"),
"hash_normalize": args.hash_normalize or os.environ.get("MINDNET_HASH_NORMALIZE", "canonical"),
"hash_source": args.hash_source or os.environ.get("MINDNET_HASH_SOURCE", "parsed"),
}, ensure_ascii=False))
if old_text and new_text:
ud = list(difflib.unified_diff(
old_text.splitlines(), new_text.splitlines(),
fromfile="qdrant_fulltext(old)", tofile="vault_body(new)",
n=3
))
if ud:
preview = "\n".join(ud[:50])
print(json.dumps({"note_id": note_id, "diff_preview": preview}, ensure_ascii=False))
# Chunks + Embeddings (Nullvektor-Fallback)
chunks = assemble_chunks(fm["id"], parsed.body, fm.get("type", "concept"))
chunk_pls = make_chunk_payloads(fm, note_pl["path"], chunks)
if embed_texts:
vecs = embed_texts([getattr(c, "text", "") for c in chunks]) # type: ignore[attr-defined]
else:
vecs = [[0.0] * cfg.dim for _ in chunks]
# Edges (neues Schema, mit note_id als Owner)
note_refs = note_pl.get("references") or []
edges = build_edges_for_note(
note_id,
chunk_pls,
note_refs,
include_note_scope_refs=note_scope_refs,
)
# Zusammenfassung pro Datei
summary = {
"note_id": note_id,
"title": fm.get("title"),
"chunks": len(chunk_pls),
"edges": len(edges),
"changed": changed,
"decision": ("apply" if args.apply and changed else
"apply-skip-unchanged" if args.apply and not changed else
"dry-run"),
"path": note_pl["path"],
"hash_mode": args.hash_mode or os.environ.get("MINDNET_HASH_MODE") or os.environ.get("MINDNET_HASH_COMPARE", "body"),
"hash_normalize": args.hash_normalize or os.environ.get("MINDNET_HASH_NORMALIZE", "canonical"),
"hash_source": args.hash_source or os.environ.get("MINDNET_HASH_SOURCE", "parsed"),
}
print(json.dumps(summary, ensure_ascii=False))
if not args.apply:
continue
if changed and args.purge_before_upsert:
purge_note_artifacts(client, cfg.prefix, note_id)
# Upserts
notes_name, note_pts = points_for_note(cfg.prefix, note_pl, None, cfg.dim)
upsert_batch(client, notes_name, note_pts)
chunks_name, chunk_pts = points_for_chunks(cfg.prefix, chunk_pls, vecs)
upsert_batch(client, chunks_name, chunk_pts)
edges_name, edge_pts = points_for_edges(cfg.prefix, edges)
upsert_batch(client, edges_name, edge_pts)
print(f"Done. Processed notes: {processed}")
if __name__ == "__main__":
main()