app/core/derive_edges.py aktualisiert
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 3s

This commit is contained in:
Lars 2025-09-30 12:36:19 +02:00
parent 3c67fd5f9b
commit 7cda15553d

View File

@ -1,126 +1,169 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
Name: app/core/derive_edges.py Modul: app/core/derive_edges.py
Version: v1.1.0 (2025-09-05) Version: 1.3.0
Datum: 2025-09-30
Kurzbeschreibung Zweck
Leitet Edges aus Wikilinks ([[]]) ab und löst Zielnoten robust auf. -----
Erzeugt: Robuste Kantenbildung für mindnet (Notes/Chunks):
- "references" (Note -> Note) mit seq="body", pro Match eine eigene Occurrence 'occ' - belongs_to (chunk -> note)
- "backlink" (inverse zu "references", gleiche seq/occ) - next / prev (chunk-Kette)
- "references_at" (Chunk -> Note) mit seq=<chunk_index> und eigener 'occ' je Match - references (chunk-scope) aus Chunk.window (Fallback text/content/raw)
- optional references (note-scope) dedupliziert
- optional backlink (note-scope) als Gegenkante
Aufruf Designhinweise
from app.core.derive_edges import build_note_index, derive_wikilink_edges --------------
- Für die Referenz-Extraktion wird bewusst das Feld **window** verwendet (nicht 'text'),
damit Links, die an einer Overlap-Grenze liegen, nicht verloren gehen.
- IDs werden später deterministisch in app/core/qdrant_points.py aus Payload erzeugt.
- 'status' setzen wir nicht hart; ein separater Resolver kann 'unresolved' o.ä. bestimmen.
Parameter / Felder Erwartete Chunk-Payload-Felder (pro Element):
- note_payload: {"note_id","title","path","fulltext": <body> , } {
- chunks_payloads: [{"chunk_id","text",}, ] "note_id": "...",
- note_index: build_note_index([...]) -> (by_id, by_slug, by_file_slug) "chunk_id": "...", # Alias "id" sollte ebenfalls vorhanden sein (abwärtskompatibel)
"id": "...",
"chunk_index": int,
"seq": int,
"window": str,
"text": str,
"path": "rel/path.md",
...
}
Kompatibilität API
- Rückwärtskompatible Payload-Felder, nur erweitert um 'seq' und 'occ'. ---
def build_edges_for_note(
Changelog note_id: str,
v1.1.0: Occurrence-Zählung ('occ') je Match; 'seq="body"' für references. chunks: List[dict],
note_level_references: List[str] | None = None,
include_note_scope_refs: bool = False,
) -> List[dict]
""" """
from __future__ import annotations from __future__ import annotations
import re import re
import unicodedata from typing import Dict, List, Optional, Iterable, Tuple
from typing import Dict, List, Tuple
# [[Ziel]], [[Ziel|Alias]], [[Ziel#Heading]], [[Ziel#Heading|Alias]] _WIKILINK_RE = re.compile(r"\[\[([^\]]+)\]\]")
WIKILINK_RE = re.compile(r"\[\[([^\]|#]+)(?:#([^\]|]+))?(?:\|([^\]]+))?\]\]")
def _slug(s: str) -> str: def _get(d: dict, *keys, default=None):
s = s.strip() for k in keys:
if s.endswith(".md"): if k in d and d[k] is not None:
s = s[:-3] return d[k]
s = unicodedata.normalize("NFKD", s) return default
s = "".join(ch for ch in s if not unicodedata.combining(ch))
s = s.replace("\\", "/")
s = s.split("/")[-1]
s = s.lower().replace(" ", "-")
s = re.sub(r"[^a-z0-9\-]+", "", s)
s = re.sub(r"-{2,}", "-", s).strip("-")
return s
def build_note_index(notes_payloads: List[dict]) -> Tuple[Dict[str, dict], Dict[str, dict], Dict[str, dict]]: def _chunk_text_for_refs(chunk: dict) -> str:
by_id: Dict[str, dict] = {} # bevorzugt 'window' → dann 'text' → 'content' → 'raw'
by_slug: Dict[str, dict] = {} return (
by_file_slug: Dict[str, dict] = {} _get(chunk, "window")
for n in notes_payloads: or _get(chunk, "text")
nid = n.get("note_id") or n.get("id") or _get(chunk, "content")
if not nid: or _get(chunk, "raw")
or ""
)
def _extract_wikilinks(text: str) -> List[str]:
if not text:
return []
out: List[str] = []
for m in _WIKILINK_RE.finditer(text):
label = m.group(1).strip()
if not label:
continue continue
by_id[nid] = n # Einfach-Normalisierung: Leerraum trimmen; weitere Normalisierung (Slugging)
title = n.get("title", "") # kann upstream erfolgen (Parser oder Resolver).
path = n.get("path", "") out.append(label)
file_slug = _slug(path.split("/")[-1]) if path else "" return out
if title:
by_slug[_slug(title)] = n
if file_slug:
by_file_slug[file_slug] = n
return by_id, by_slug, by_file_slug
def resolve_target(note_like: str, idx: Tuple[Dict[str,dict],Dict[str,dict],Dict[str,dict]]): def _dedupe(seq: Iterable[str]) -> List[str]:
by_id, by_slug, by_file_slug = idx seen = set()
key = note_like.strip() out: List[str] = []
if key in by_id: for s in seq:
return by_id[key]["note_id"], "by_id" if s not in seen:
s = _slug(key) seen.add(s)
if s in by_slug: out.append(s)
return by_slug[s]["note_id"], "by_slug" return out
if s in by_file_slug:
return by_file_slug[s]["note_id"], "by_file_slug"
return None, "unresolved"
def derive_wikilink_edges(note_payload: dict, chunks_payloads: List[dict], note_index) -> List[dict]: def _edge(kind: str, scope: str, source_id: str, target_id: str, note_id: str, extra: Optional[dict] = None) -> dict:
pl = {
"kind": kind,
"scope": scope, # "chunk" | "note"
"source_id": source_id,
"target_id": target_id,
"note_id": note_id, # Quelle/Träger der Kante (die aktuelle Note)
}
if extra:
pl.update(extra)
return pl
def build_edges_for_note(
note_id: str,
chunks: List[dict],
note_level_references: Optional[List[str]] = None,
include_note_scope_refs: bool = False,
) -> List[dict]:
"""
Erzeugt Kanten für eine Note.
- belongs_to: für jeden Chunk (chunk -> note)
- next / prev: zwischen aufeinanderfolgenden Chunks
- references: pro Chunk aus window/text
- optional note-scope references/backlinks: dedupliziert über alle Chunk-Funde + note_level_references
Rückgabe: Liste von Edge-Payloads (ohne 'id'; Qdrant-ID wird deterministisch aus Payload erzeugt)
"""
edges: List[dict] = [] edges: List[dict] = []
source_note_id = note_payload["note_id"]
def _make_edge(kind: str, src: str, tgt: str, seq=None, occ=None, extra: dict|None=None): # --- Strukturkanten ---
e = {"edge_id": None, "kind": kind, "source_id": src, "target_id": tgt} # belongs_to
if seq is not None: for ch in chunks:
e["seq"] = seq cid = _get(ch, "chunk_id", "id")
if occ is not None: if not cid:
e["occ"] = occ # defensiv: überspringen statt Crash
if extra:
e.update(extra)
return e
# Volltext (Note-Ebene)
fulltext = note_payload.get("fulltext") or note_payload.get("body") or ""
if fulltext:
for k, m in enumerate(WIKILINK_RE.finditer(fulltext), start=1):
raw_target, heading, alias = m.groups()
target_id, how = resolve_target(raw_target, note_index)
extra = {"raw": raw_target, "alias": alias, "heading": heading, "resolution": how}
if target_id:
edges.append(_make_edge("references", source_note_id, target_id, seq="body", occ=k, extra=extra))
edges.append(_make_edge("backlink", target_id, source_note_id, seq="body", occ=k, extra=extra))
else:
extra["status"] = "unresolved"
extra["target_label"] = raw_target
edges.append(_make_edge("references", source_note_id, raw_target, seq="body", occ=k, extra=extra))
# Chunks (Chunk-Ebene)
for i, ch in enumerate(chunks_payloads, start=1):
txt = ch.get("text") or ch.get("content") or ""
if not txt:
continue continue
occ = 0 edges.append(_edge("belongs_to", "chunk", cid, note_id, note_id, {"chunk_id": cid}))
for m in WIKILINK_RE.finditer(txt):
occ += 1 # next/prev
raw_target, heading, alias = m.groups() for i in range(len(chunks) - 1):
target_id, how = resolve_target(raw_target, note_index) a = chunks[i]
extra = {"raw": raw_target, "alias": alias, "heading": heading, "resolution": how} b = chunks[i + 1]
if target_id: a_id = _get(a, "chunk_id", "id")
edges.append(_make_edge("references_at", ch["chunk_id"], target_id, seq=i, occ=occ, extra=extra)) b_id = _get(b, "chunk_id", "id")
else: if not a_id or not b_id:
extra["status"] = "unresolved" continue
extra["target_label"] = raw_target edges.append(_edge("next", "chunk", a_id, b_id, note_id, {"chunk_id": a_id}))
edges.append(_make_edge("references_at", ch["chunk_id"], raw_target, seq=i, occ=occ, extra=extra)) edges.append(_edge("prev", "chunk", b_id, a_id, note_id, {"chunk_id": b_id}))
# --- Referenzkanten (chunk-scope) ---
refs_all: List[str] = []
for ch in chunks:
cid = _get(ch, "chunk_id", "id")
if not cid:
continue
txt = _chunk_text_for_refs(ch)
refs = _extract_wikilinks(txt)
if not refs:
continue
for r in refs:
edges.append(_edge("references", "chunk", cid, r, note_id, {"chunk_id": cid, "ref_text": r}))
refs_all.extend(refs)
# --- Note-scope (optional) ---
if include_note_scope_refs:
# Inputs: dedup aller Chunk-Funde + optional vorhandene Note-Level-Refs aus Payload
refs_note = refs_all[:]
if note_level_references:
refs_note.extend([r for r in note_level_references if isinstance(r, str) and r])
refs_note = _dedupe(refs_note)
for r in refs_note:
# forward
edges.append(_edge("references", "note", note_id, r, note_id))
# backlink (reverse)
edges.append(_edge("backlink", "note", r, note_id, note_id))
return edges return edges