127 lines
4.8 KiB
Python
127 lines
4.8 KiB
Python
#!/usr/bin/env python3
|
|
# -*- coding: utf-8 -*-
|
|
"""
|
|
Name: app/core/derive_edges.py
|
|
Version: v1.1.0 (2025-09-05)
|
|
|
|
Kurzbeschreibung
|
|
Leitet Edges aus Wikilinks ([[…]]) ab und löst Zielnoten robust auf.
|
|
Erzeugt:
|
|
- "references" (Note -> Note) mit seq="body", pro Match eine eigene Occurrence 'occ'
|
|
- "backlink" (inverse zu "references", gleiche seq/occ)
|
|
- "references_at" (Chunk -> Note) mit seq=<chunk_index> und eigener 'occ' je Match
|
|
|
|
Aufruf
|
|
from app.core.derive_edges import build_note_index, derive_wikilink_edges
|
|
|
|
Parameter / Felder
|
|
- note_payload: {"note_id","title","path","fulltext": <body> , …}
|
|
- chunks_payloads: [{"chunk_id","text",…}, …]
|
|
- note_index: build_note_index([...]) -> (by_id, by_slug, by_file_slug)
|
|
|
|
Kompatibilität
|
|
- Rückwärtskompatible Payload-Felder, nur erweitert um 'seq' und 'occ'.
|
|
|
|
Changelog
|
|
v1.1.0: Occurrence-Zählung ('occ') je Match; 'seq="body"' für references.
|
|
"""
|
|
|
|
from __future__ import annotations
|
|
import re
|
|
import unicodedata
|
|
from typing import Dict, List, Tuple
|
|
|
|
# [[Ziel]], [[Ziel|Alias]], [[Ziel#Heading]], [[Ziel#Heading|Alias]]
|
|
WIKILINK_RE = re.compile(r"\[\[([^\]|#]+)(?:#([^\]|]+))?(?:\|([^\]]+))?\]\]")
|
|
|
|
def _slug(s: str) -> str:
|
|
s = s.strip()
|
|
if s.endswith(".md"):
|
|
s = s[:-3]
|
|
s = unicodedata.normalize("NFKD", s)
|
|
s = "".join(ch for ch in s if not unicodedata.combining(ch))
|
|
s = s.replace("\\", "/")
|
|
s = s.split("/")[-1]
|
|
s = s.lower().replace(" ", "-")
|
|
s = re.sub(r"[^a-z0-9\-]+", "", s)
|
|
s = re.sub(r"-{2,}", "-", s).strip("-")
|
|
return s
|
|
|
|
def build_note_index(notes_payloads: List[dict]) -> Tuple[Dict[str, dict], Dict[str, dict], Dict[str, dict]]:
|
|
by_id: Dict[str, dict] = {}
|
|
by_slug: Dict[str, dict] = {}
|
|
by_file_slug: Dict[str, dict] = {}
|
|
for n in notes_payloads:
|
|
nid = n.get("note_id") or n.get("id")
|
|
if not nid:
|
|
continue
|
|
by_id[nid] = n
|
|
title = n.get("title", "")
|
|
path = n.get("path", "")
|
|
file_slug = _slug(path.split("/")[-1]) if path else ""
|
|
if title:
|
|
by_slug[_slug(title)] = n
|
|
if file_slug:
|
|
by_file_slug[file_slug] = n
|
|
return by_id, by_slug, by_file_slug
|
|
|
|
def resolve_target(note_like: str, idx: Tuple[Dict[str,dict],Dict[str,dict],Dict[str,dict]]):
|
|
by_id, by_slug, by_file_slug = idx
|
|
key = note_like.strip()
|
|
if key in by_id:
|
|
return by_id[key]["note_id"], "by_id"
|
|
s = _slug(key)
|
|
if s in by_slug:
|
|
return by_slug[s]["note_id"], "by_slug"
|
|
if s in by_file_slug:
|
|
return by_file_slug[s]["note_id"], "by_file_slug"
|
|
return None, "unresolved"
|
|
|
|
def derive_wikilink_edges(note_payload: dict, chunks_payloads: List[dict], note_index) -> List[dict]:
|
|
edges: List[dict] = []
|
|
source_note_id = note_payload["note_id"]
|
|
|
|
def _make_edge(kind: str, src: str, tgt: str, seq=None, occ=None, extra: dict|None=None):
|
|
e = {"edge_id": None, "kind": kind, "source_id": src, "target_id": tgt}
|
|
if seq is not None:
|
|
e["seq"] = seq
|
|
if occ is not None:
|
|
e["occ"] = occ
|
|
if extra:
|
|
e.update(extra)
|
|
return e
|
|
|
|
# Volltext (Note-Ebene)
|
|
fulltext = note_payload.get("fulltext") or note_payload.get("body") or ""
|
|
if fulltext:
|
|
for k, m in enumerate(WIKILINK_RE.finditer(fulltext), start=1):
|
|
raw_target, heading, alias = m.groups()
|
|
target_id, how = resolve_target(raw_target, note_index)
|
|
extra = {"raw": raw_target, "alias": alias, "heading": heading, "resolution": how}
|
|
if target_id:
|
|
edges.append(_make_edge("references", source_note_id, target_id, seq="body", occ=k, extra=extra))
|
|
edges.append(_make_edge("backlink", target_id, source_note_id, seq="body", occ=k, extra=extra))
|
|
else:
|
|
extra["status"] = "unresolved"
|
|
extra["target_label"] = raw_target
|
|
edges.append(_make_edge("references", source_note_id, raw_target, seq="body", occ=k, extra=extra))
|
|
|
|
# Chunks (Chunk-Ebene)
|
|
for i, ch in enumerate(chunks_payloads, start=1):
|
|
txt = ch.get("text") or ch.get("content") or ""
|
|
if not txt:
|
|
continue
|
|
occ = 0
|
|
for m in WIKILINK_RE.finditer(txt):
|
|
occ += 1
|
|
raw_target, heading, alias = m.groups()
|
|
target_id, how = resolve_target(raw_target, note_index)
|
|
extra = {"raw": raw_target, "alias": alias, "heading": heading, "resolution": how}
|
|
if target_id:
|
|
edges.append(_make_edge("references_at", ch["chunk_id"], target_id, seq=i, occ=occ, extra=extra))
|
|
else:
|
|
extra["status"] = "unresolved"
|
|
extra["target_label"] = raw_target
|
|
edges.append(_make_edge("references_at", ch["chunk_id"], raw_target, seq=i, occ=occ, extra=extra))
|
|
return edges
|