#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Script: import_markdown.py — Markdown → Qdrant (Notes, Chunks, Edges) Version: 3.2.0 Datum: 2025-09-09 Kurzbeschreibung ---------------- Liest Markdown-Dateien aus einem Vault ein und schreibt Notes, Chunks und Edges idempotent nach Qdrant. Change-Detection basiert standardmäßig auf dem **Body-Hash**. Neu: Hash-Modus und Normalisierung sind auch per **CLI** steuerbar. ENV / Qdrant ------------ - QDRANT_URL (oder QDRANT_HOST/QDRANT_PORT) - QDRANT_API_KEY (optional) - COLLECTION_PREFIX (Default: mindnet) - VECTOR_DIM (Default: 384) - MINDNET_HASH_MODE: "body" (Default) | "frontmatter" | "body+frontmatter" - MINDNET_HASH_NORMALIZE: "canonical" (Default) | "none" CLI (übersteuert ENV) --------------------- --hash-mode body|frontmatter|body+frontmatter --hash-normalize canonical|none Aufruf ------ python3 -m scripts.import_markdown --vault ./vault python3 -m scripts.import_markdown --vault ./vault --apply python3 -m scripts.import_markdown --vault ./vault --apply --purge-before-upsert python3 -m scripts.import_markdown --vault ./vault --note-id 20250821-foo --apply python3 -m scripts.import_markdown --vault ./vault --apply --embed-note # Feingranulare Erkennung (jede Kleinigkeit im Body zählt): python3 -m scripts.import_markdown --vault ./vault --hash-normalize none """ from __future__ import annotations import argparse import json import os import sys from typing import Dict, List, Optional, Tuple from dotenv import load_dotenv from qdrant_client.http import models as rest from app.core.parser import ( read_markdown, normalize_frontmatter, validate_required_frontmatter, extract_wikilinks, ) from app.core.note_payload import make_note_payload from app.core.chunker import assemble_chunks from app.core.chunk_payload import make_chunk_payloads from app.core.qdrant import QdrantConfig, get_client, ensure_collections from app.core.qdrant_points import ( points_for_chunks, points_for_note, points_for_edges, upsert_batch, ) try: from app.core.embed import embed_texts, embed_one # optional except Exception: embed_texts = None embed_one = None # ----------------------------------------------------------------------------- # Hilfsfunktionen # ----------------------------------------------------------------------------- def iter_md(root: str) -> List[str]: out: List[str] = [] for dirpath, _, filenames in os.walk(root): for fn in filenames: if not fn.lower().endswith(".md"): continue p = os.path.join(dirpath, fn) pn = p.replace("\\", "/") if any(ex in pn for ex in ["/.obsidian/", "/_backup_frontmatter/", "/_imported/"]): continue out.append(p) return sorted(out) def collections(prefix: str) -> Tuple[str, str, str]: return f"{prefix}_notes", f"{prefix}_chunks", f"{prefix}_edges" def fetch_existing_note_hash(client, prefix: str, note_id: str) -> Optional[str]: notes_col, _, _ = collections(prefix) f = rest.Filter(must=[rest.FieldCondition( key="note_id", match=rest.MatchValue(value=note_id), )]) points, _ = client.scroll( collection_name=notes_col, scroll_filter=f, with_payload=True, with_vectors=False, limit=1, ) if not points: return None pl = points[0].payload or {} return pl.get("hash_fulltext") def purge_note_artifacts(client, prefix: str, note_id: str) -> None: _, chunks_col, edges_col = collections(prefix) f_chunks = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))]) client.delete(collection_name=chunks_col, points_selector=f_chunks, wait=True) should = [ rest.FieldCondition(key="source_id", match=rest.MatchText(text=f"{note_id}#")), rest.FieldCondition(key="target_id", match=rest.MatchText(text=f"{note_id}#")), rest.FieldCondition(key="source_id", match=rest.MatchValue(value=note_id)), rest.FieldCondition(key="target_id", match=rest.MatchValue(value=note_id)), ] f_edges = rest.Filter(should=should) client.delete(collection_name=edges_col, points_selector=f_edges, wait=True) def _normalize_rel_path(abs_path: str, vault_root: str) -> str: try: rel = os.path.relpath(abs_path, vault_root) except Exception: rel = abs_path return rel.replace("\\", "/").lstrip("/") # ----------------------------------------------------------------------------- # Main # ----------------------------------------------------------------------------- def main() -> None: load_dotenv() ap = argparse.ArgumentParser() ap.add_argument("--vault", required=True, help="Pfad zum Obsidian-Vault (Root-Ordner)") ap.add_argument("--apply", action="store_true", help="Schreibt in Qdrant; ohne Flag nur Dry-Run") ap.add_argument("--purge-before-upsert", action="store_true", help="Vor Upsert Chunks & Edges der GEÄNDERTEN Note löschen") ap.add_argument("--note-id", help="Nur eine bestimmte Note-ID verarbeiten") ap.add_argument("--embed-note", action="store_true", help="Optional: Note-Volltext einbetten") ap.add_argument("--force-replace", action="store_true", help="Änderungserkennung ignorieren und immer upserten (+ optional Purge)") # NEU: Hash-Steuerung per CLI ap.add_argument("--hash-mode", choices=["body", "frontmatter", "body+frontmatter"], default=None) ap.add_argument("--hash-normalize", choices=["canonical", "none"], default=None) args = ap.parse_args() cfg = QdrantConfig.from_env() client = get_client(cfg) ensure_collections(client, cfg.prefix, cfg.dim) notes_col, chunks_col, edges_col = collections(cfg.prefix) root = os.path.abspath(args.vault) files = iter_md(root) if not files: print("Keine Markdown-Dateien gefunden.", file=sys.stderr) sys.exit(2) processed = 0 for path in files: parsed = read_markdown(path) fm = normalize_frontmatter(parsed.frontmatter) try: validate_required_frontmatter(fm) except Exception as e: print(json.dumps({"path": path, "error": f"Frontmatter invalid: {e}"})) continue if args.note_id and fm.get("id") != args.note_id: continue processed += 1 # Note-Payload (mit expliziten Hash-Parametern) note_pl = make_note_payload(parsed, vault_root=root, hash_mode=args.hash_mode, hash_normalize=args.hash_normalize) if "fulltext" not in (note_pl or {}): note_pl["fulltext"] = parsed.body or "" if note_pl.get("path"): note_pl["path"] = _normalize_rel_path( os.path.join(root, note_pl["path"]) if not os.path.isabs(note_pl["path"]) else note_pl["path"], root ) else: note_pl["path"] = _normalize_rel_path(parsed.path, root) note_id = note_pl["note_id"] # Change-Detection new_hash = note_pl.get("hash_fulltext") old_hash = None if args.force_replace else fetch_existing_note_hash(client, cfg.prefix, note_id) changed = args.force_replace or (old_hash != new_hash) # Chunks + Embeddings (Nullvektor-Fallback) chunks = assemble_chunks(fm["id"], parsed.body, fm.get("type", "concept")) chunk_pls = make_chunk_payloads(fm, note_pl["path"], chunks) if embed_texts: vecs = embed_texts([getattr(c, "text", "") for c in chunks]) else: vecs = [[0.0] * cfg.dim for _ in chunks] # Edges (leichtgewichtig direkt hier ableiten) # belongs_to / prev/next aus Chunk-Nachbarschaften + Wikilinks als references edges: List[Dict] = [] for ch in chunk_pls: cid = ch["id"] edges.append({"src_id": cid, "dst_id": note_id, "edge_type": "belongs_to", "scope": "chunk"}) nb = ch.get("neighbors") or {} if nb.get("prev"): edges.append({"src_id": nb["prev"], "dst_id": cid, "edge_type": "next", "scope": "chunk"}) edges.append({"src_id": cid, "dst_id": nb["prev"], "edge_type": "prev", "scope": "chunk"}) if nb.get("next"): edges.append({"src_id": cid, "dst_id": nb["next"], "edge_type": "next", "scope": "chunk"}) edges.append({"src_id": nb["next"], "dst_id": cid, "edge_type": "prev", "scope": "chunk"}) for ref in (ch.get("references") or []): tid = ref.get("target_id") if tid: edges.append({"src_id": cid, "dst_id": tid, "edge_type": "references", "scope": "chunk"}) for tid in (note_pl.get("references") or []): edges.append({"src_id": note_id, "dst_id": tid, "edge_type": "references", "scope": "note"}) edges.append({"src_id": tid, "dst_id": note_id, "edge_type": "backlink", "scope": "note"}) # Dedupe _uniq = {} for e in edges: _uniq[(e["src_id"], e["dst_id"], e["edge_type"], e.get("scope", ""))] = e edges = list(_uniq.values()) # Zusammenfassung summary = { "note_id": note_id, "title": fm.get("title"), "chunks": len(chunk_pls), "edges": len(edges), "changed": changed, "decision": ("apply" if args.apply and changed else "apply-skip-unchanged" if args.apply and not changed else "dry-run"), "path": note_pl["path"], "hash_mode": args.hash_mode or os.environ.get("MINDNET_HASH_MODE", "body"), "hash_normalize": args.hash_normalize or os.environ.get("MINDNET_HASH_NORMALIZE", "canonical"), "hash_old": old_hash, "hash_new": new_hash, } print(json.dumps(summary, ensure_ascii=False)) if not args.apply: continue if changed and args.purge_before_upsert: purge_note_artifacts(client, cfg.prefix, note_id) notes_name, note_pts = points_for_note(cfg.prefix, note_pl, None, cfg.dim) upsert_batch(client, notes_name, note_pts) chunks_name, chunk_pts = points_for_chunks(cfg.prefix, chunk_pls, vecs) upsert_batch(client, chunks_name, chunk_pts) edges_name, edge_pts = points_for_edges(cfg.prefix, edges) upsert_batch(client, edges_name, edge_pts) print(f"Done. Processed notes: {processed}") if __name__ == "__main__": main()