scripts/import_markdown.py aktualisiert
Some checks failed
Deploy mindnet to llm-node / deploy (push) Failing after 1s
Some checks failed
Deploy mindnet to llm-node / deploy (push) Failing after 1s
This commit is contained in:
parent
1ee66450b1
commit
fb4bf79841
|
|
@ -2,7 +2,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Script: scripts/import_markdown.py
|
||||
Version: v2.3.0 (2025-09-05)
|
||||
Version: v2.3.1 (2025-09-05)
|
||||
|
||||
Beschreibung
|
||||
Importiert Markdown-Notizen in Qdrant (Notes, Chunks, Edges).
|
||||
|
|
@ -26,7 +26,9 @@ Hinweise
|
|||
- Qdrant via env: QDRANT_URL, QDRANT_API_KEY, COLLECTION_PREFIX, VECTOR_DIM
|
||||
|
||||
Changelog
|
||||
v2.3.0: Umstellung auf app.core.derive_edges + Note-Index vorab; Edge-IDs unterstützen Occurrence.
|
||||
v2.3.1: FIX – Für derive_wikilink_edges werden jetzt echte Chunk-Texte übergeben
|
||||
(chunks_for_links mit {"chunk_id","text"}), damit `references_at` erzeugt werden.
|
||||
v2.3.0: Umstellung auf app.core.derive_edges; Edge-IDs unterstützen Occurrence.
|
||||
v2.2.x: Fix Filter-API (pydantic v2) bei Purge; Kleinkosmetik.
|
||||
"""
|
||||
|
||||
|
|
@ -42,7 +44,7 @@ from app.core.chunk_payload import make_chunk_payloads
|
|||
from app.core.embed import embed_texts, embed_one
|
||||
from app.core.qdrant import QdrantConfig, ensure_collections, get_client
|
||||
from app.core.qdrant_points import points_for_chunks, points_for_note, points_for_edges, upsert_batch
|
||||
from app.core.derive_edges import build_note_index, derive_wikilink_edges
|
||||
from app.core.derive_edges import build_note_index, derive_wikilink_edges # nutzt 'text' je Chunk :contentReference[oaicite:1]{index=1}
|
||||
from qdrant_client.http import models as rest
|
||||
|
||||
def iter_md(root: str, exclude_dirs=("/.obsidian/", "/_backup_frontmatter/", "/_imported/")):
|
||||
|
|
@ -56,12 +58,12 @@ def iter_md(root: str, exclude_dirs=("/.obsidian/", "/_backup_frontmatter/", "/_
|
|||
return out
|
||||
|
||||
def purge_note_edges(client, prefix: str, source_note_id: str):
|
||||
# Löscht alle Edges, deren source_id == source_note_id ODER (kind==backlink && target_id==source_note_id)
|
||||
# Löscht Edges, deren source_id == source_note_id ODER (kind==backlink && target_id==source_note_id)
|
||||
edges_col = f"{prefix}_edges"
|
||||
f = rest.Filter(
|
||||
should=[
|
||||
rest.FieldCondition(key="source_id", match=rest.MatchValue(value=source_note_id)),
|
||||
rest.Filter( # backlink-Variante
|
||||
rest.Filter(
|
||||
must=[
|
||||
rest.FieldCondition(key="kind", match=rest.MatchValue(value="backlink")),
|
||||
rest.FieldCondition(key="target_id", match=rest.MatchValue(value=source_note_id)),
|
||||
|
|
@ -69,6 +71,7 @@ def purge_note_edges(client, prefix: str, source_note_id: str):
|
|||
),
|
||||
]
|
||||
)
|
||||
# pydantic v2: client.delete(...) statt delete_points(...)
|
||||
client.delete(collection_name=edges_col, points_selector=f, wait=True)
|
||||
|
||||
def main():
|
||||
|
|
@ -96,7 +99,7 @@ def main():
|
|||
if not files:
|
||||
print("Keine Markdown-Dateien gefunden.", file=sys.stderr); sys.exit(2)
|
||||
|
||||
# --- NEU: Note-Index vorab (für robuste Zielauflösung) ---
|
||||
# --- Note-Index (für robuste Zielauflösung) ---
|
||||
note_stubs = []
|
||||
for path in files:
|
||||
parsed = read_markdown(path)
|
||||
|
|
@ -107,10 +110,8 @@ def main():
|
|||
continue
|
||||
if args.note_id and fm.get("id") != args.note_id:
|
||||
continue
|
||||
# Minimaler Stub (ohne Embeddings), reicht für build_note_index
|
||||
rel = os.path.relpath(parsed.path, root).replace("\\","/")
|
||||
np = {"note_id": fm["id"], "title": fm.get("title",""), "path": rel}
|
||||
note_stubs.append(np)
|
||||
note_stubs.append({"note_id": fm["id"], "title": fm.get("title",""), "path": rel})
|
||||
note_index = build_note_index(note_stubs)
|
||||
|
||||
total_notes = 0
|
||||
|
|
@ -129,35 +130,40 @@ def main():
|
|||
# Note-Payload
|
||||
note_pl = make_note_payload(parsed, vault_root=root)
|
||||
validate_note_payload(note_pl)
|
||||
# Volltext für derive_edges verfügbar machen
|
||||
note_pl["fulltext"] = parsed.body
|
||||
note_pl["fulltext"] = parsed.body # für derive_edges
|
||||
|
||||
# Chunks
|
||||
chunks = assemble_chunks(fm["id"], parsed.body, fm.get("type", "concept"))
|
||||
chunk_pls = make_chunk_payloads(fm, note_pl["path"], chunks)
|
||||
|
||||
# Embeddings (Chunks)
|
||||
texts = [c for c in (ch.text for ch in chunks)]
|
||||
texts = [ch.text for ch in chunks]
|
||||
vectors = embed_texts(texts)
|
||||
|
||||
# Optional: Note-Vektor
|
||||
note_vec = embed_one(parsed.body) if args.embed_note else None
|
||||
|
||||
# Kanten (NEU: derive_wikilink_edges)
|
||||
edges = derive_wikilink_edges(note_pl, chunk_pls, note_index)
|
||||
# --- WICHTIG: Chunk-Texte für derive_wikilink_edges ---
|
||||
chunks_for_links = [
|
||||
{"chunk_id": (pl.get("chunk_id") or pl.get("id") or f"{fm['id']}#{i+1}"),
|
||||
"text": chunks[i].text}
|
||||
for i, pl in enumerate(chunk_pls)
|
||||
if i < len(chunks)
|
||||
]
|
||||
|
||||
# Kanten (Note- & Chunk-Ebene)
|
||||
edges = derive_wikilink_edges(note_pl, chunks_for_links, note_index)
|
||||
|
||||
# Dry-Run-Ausgabe
|
||||
summary = {
|
||||
print(json.dumps({
|
||||
"note_id": fm["id"],
|
||||
"title": fm["title"],
|
||||
"chunks": len(chunk_pls),
|
||||
"edges": len(edges),
|
||||
"path": note_pl["path"]
|
||||
}
|
||||
print(json.dumps(summary, ensure_ascii=False))
|
||||
}, ensure_ascii=False))
|
||||
|
||||
if args.apply:
|
||||
# Ersetzung der Edges für diese Note
|
||||
if args.force_replace:
|
||||
purge_note_edges(client, cfg.prefix, fm["id"])
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user