neue chunker strategie mit smart chunking
This commit is contained in:
parent
49cdc9a13f
commit
cd28a9d35c
|
|
@ -1,6 +1,6 @@
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
from typing import List, Dict, Optional, Tuple, Any
|
from typing import List, Dict, Optional, Tuple, Any, Set
|
||||||
import re
|
import re
|
||||||
import math
|
import math
|
||||||
import yaml
|
import yaml
|
||||||
|
|
@ -10,387 +10,124 @@ from markdown_it.token import Token
|
||||||
import asyncio
|
import asyncio
|
||||||
|
|
||||||
# NEUE IMPORTS
|
# NEUE IMPORTS
|
||||||
# Import der benötigten Klassen direkt (ersetzt get_semantic_analyzer)
|
from app.services.semantic_analyzer import get_semantic_analyzer
|
||||||
try:
|
from app.core.note_payload import extract_frontmatter_from_text
|
||||||
# ANNAHME: Die Klassen SemanticAnalyzer und SemanticChunkResult existieren in app.services.semantic_analyzer.py
|
# WICHTIG: Import der Edge Derivations Logik
|
||||||
from app.services.semantic_analyzer import SemanticAnalyzer, SemanticChunkResult
|
from app.core.derive_edges import build_edges_for_note # <-- Muss importiert werden
|
||||||
except ImportError:
|
|
||||||
# Fallback für Tests, wenn der Service noch nicht auf dem Pfad ist
|
|
||||||
print("WARNUNG: SemanticAnalyzer Service nicht gefunden. Semantic Chunking wird fehlschlagen.")
|
|
||||||
class SemanticAnalyzer:
|
|
||||||
async def analyze_and_chunk(self, text, type): return [SemanticChunkResult(content=text, suggested_edges=[])]
|
|
||||||
@dataclass
|
|
||||||
class SemanticChunkResult:
|
|
||||||
content: str
|
|
||||||
suggested_edges: List[str] # Format: "kind:Target"
|
|
||||||
|
|
||||||
|
# ... bestehender Code (Konfiguration, Hilfsfunktionen, RawBlock, Chunk)
|
||||||
|
|
||||||
# ==========================================
|
# --- NEUE STRATEGIE: SMART EDGE ALLOCATION (Ersetzt _strategy_semantic_llm) ---
|
||||||
# 1. FUNKTION ZUM AUSLESEN DES FRONTMATTERS (Lokalisiert und stabil)
|
async def _strategy_smart_edge_allocation(md_text: str, config: Dict, note_id: str, note_type: str) -> List[Chunk]:
|
||||||
# ==========================================
|
|
||||||
|
|
||||||
def extract_frontmatter_from_text(md_text: str) -> Tuple[Dict[str, Any], str]:
|
|
||||||
"""
|
"""
|
||||||
Extrakte das YAML Frontmatter aus dem Markdown-Text und gibt den Body zurück.
|
[WP-15, Neue Logik] Zerlegt Note deterministisch und nutzt LLM zur Zuweisung von Kanten (Schritte 1-5).
|
||||||
(Lokalisiert im Chunker zur Vermeidung von Import-Fehlern)
|
|
||||||
"""
|
"""
|
||||||
# Regex toleriert Whitespace/Newline vor dem ersten ---
|
# 0. Initialisierung
|
||||||
fm_match = re.match(r'^\s*---\s*\n(.*?)\n---', md_text, re.DOTALL)
|
analyzer = get_semantic_analyzer()
|
||||||
|
|
||||||
if not fm_match:
|
# 1. [Schritt 2 des Workflows] Sammeln ALLER Kanten (Inline & Defaults)
|
||||||
return {}, md_text
|
# Führt die Edge-Derivation für die gesamte Notiz aus, basierend auf Text und Typ.
|
||||||
|
raw_edges: List[Dict] = build_edges_for_note(
|
||||||
frontmatter_yaml = fm_match.group(1)
|
text=md_text,
|
||||||
|
note_id=note_id,
|
||||||
|
note_type=note_type,
|
||||||
|
# Leere Listen übergeben, da wir noch keine Chunks haben und nur die Note selbst analysieren.
|
||||||
|
chunks=[],
|
||||||
|
references=[]
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
# Kanten im Format "kind:Target" sammeln (ohne Duplikate)
|
||||||
# Nutzung von safe_load
|
all_note_edges = set()
|
||||||
frontmatter = yaml.safe_load(frontmatter_yaml)
|
for edge in raw_edges:
|
||||||
if not isinstance(frontmatter, dict):
|
# Extrahiere nur Kanten, die relevant für das Chunking sind (Explizite oder Defaults)
|
||||||
frontmatter = {}
|
if edge.get("target_id") and edge.get("kind"):
|
||||||
except yaml.YAMLError:
|
# Nutze target_id, da dies der Notiz-ID entspricht
|
||||||
frontmatter = {}
|
all_note_edges.add(f"{edge['kind']}:{edge['target_id']}")
|
||||||
|
|
||||||
# Entferne den Frontmatter Block aus dem Text
|
|
||||||
text_without_fm = re.sub(r'^\s*---\s*\n(.*?)\n---', '', md_text, flags=re.DOTALL)
|
|
||||||
|
|
||||||
return frontmatter, text_without_fm.strip()
|
all_note_edges_list = list(all_note_edges)
|
||||||
|
|
||||||
|
|
||||||
# ==========================================
|
|
||||||
# 2. CONFIGURATION LOADER
|
|
||||||
# ==========================================
|
|
||||||
|
|
||||||
# Pfad-Logik: app/core/chunker.py -> app/core -> app -> root/config/types.yaml
|
|
||||||
BASE_DIR = Path(__file__).resolve().parent.parent.parent
|
|
||||||
CONFIG_PATH = BASE_DIR / "config" / "types.yaml"
|
|
||||||
|
|
||||||
# Fallback Values
|
|
||||||
DEFAULT_PROFILE = {
|
|
||||||
"strategy": "sliding_window",
|
|
||||||
"target": 400,
|
|
||||||
"max": 600,
|
|
||||||
"overlap": (50, 80)
|
|
||||||
}
|
|
||||||
|
|
||||||
_CONFIG_CACHE = None
|
|
||||||
|
|
||||||
def _load_yaml_config() -> Dict[str, Any]:
|
|
||||||
"""Lädt die config/types.yaml und cached das Ergebnis."""
|
|
||||||
global _CONFIG_CACHE
|
|
||||||
if _CONFIG_CACHE is not None:
|
|
||||||
return _CONFIG_CACHE
|
|
||||||
|
|
||||||
if not CONFIG_PATH.exists():
|
|
||||||
print(f"WARNUNG: types.yaml nicht gefunden unter: {CONFIG_PATH}")
|
|
||||||
return {}
|
|
||||||
|
|
||||||
try:
|
|
||||||
with open(CONFIG_PATH, "r", encoding="utf-8") as f:
|
|
||||||
data = yaml.safe_load(f)
|
|
||||||
_CONFIG_CACHE = data
|
|
||||||
return data
|
|
||||||
except Exception as e:
|
|
||||||
print(f"FEHLER beim Laden von {CONFIG_PATH}: {e}")
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def get_chunk_config(note_type: str) -> Dict[str, Any]:
|
|
||||||
"""Löst Typ -> Profil -> Konfiguration auf."""
|
|
||||||
full_config = _load_yaml_config()
|
|
||||||
|
|
||||||
profiles = full_config.get("chunking_profiles", {})
|
|
||||||
type_def = full_config.get("types", {}).get(note_type.lower(), {})
|
|
||||||
profile_name = type_def.get("chunking_profile")
|
|
||||||
|
|
||||||
if not profile_name:
|
# 2. [Schritt 3 des Workflows] Deterministic Chunking
|
||||||
profile_name = full_config.get("defaults", {}).get("chunking_profile", "sliding_standard")
|
# Nutzt die in der Config angegebene deterministische Strategie (z.B. by_heading)
|
||||||
|
blocks, doc_title = parse_blocks(md_text)
|
||||||
|
|
||||||
config = profiles.get(profile_name, DEFAULT_PROFILE).copy()
|
# Nutze _strategy_by_heading (oder _strategy_sliding_window, je nach Config-Intent),
|
||||||
|
# da dies die robusteste deterministische Strategie ist. Die Konfiguration kommt
|
||||||
|
# vom "structured_strict" oder ähnlichem Profil.
|
||||||
|
chunks = await asyncio.to_thread(_strategy_by_heading, blocks, config, note_id, doc_title)
|
||||||
|
|
||||||
if "overlap" in config and isinstance(config["overlap"], list):
|
# Fallback, falls by_heading nur einen Chunk liefert oder fehlschlägt
|
||||||
config["overlap"] = tuple(config["overlap"])
|
if not chunks or len(chunks) <= 1:
|
||||||
|
# Erhöht die Robustheit bei unstrukturierten Texten
|
||||||
return config
|
chunks = await asyncio.to_thread(_strategy_sliding_window, blocks, config, note_id, doc_title)
|
||||||
|
|
||||||
# Legacy Support
|
if not chunks:
|
||||||
def get_sizes(note_type: str):
|
# Absoluter Fallback: Ganzer Text ist 1 Chunk.
|
||||||
cfg = get_chunk_config(note_type)
|
text = " ".join([b.text for b in blocks if b.kind not in ("heading", "code")]).strip()
|
||||||
return {
|
if text:
|
||||||
"target": (cfg["target"], cfg["target"]),
|
chunks = [Chunk(id=f"{note_id}-c0", note_id=note_id, index=0, text=text, token_count=estimate_tokens(text), section_title=doc_title, section_path="", neighbors_prev=None, neighbors_next=None, char_start=0, char_end=len(text))]
|
||||||
"max": cfg["max"],
|
|
||||||
"overlap": cfg["overlap"]
|
|
||||||
}
|
|
||||||
|
|
||||||
# ==========================================
|
|
||||||
# 3. DATA CLASSES & HELPERS
|
|
||||||
# ==========================================
|
|
||||||
|
|
||||||
# --- Hilfen ---
|
|
||||||
_SENT_SPLIT = re.compile(r'(?<=[.!?])\s+(?=[A-ZÄÖÜ0-9„(])')
|
|
||||||
_WS = re.compile(r'\s+')
|
|
||||||
|
|
||||||
def estimate_tokens(text: str) -> int:
|
|
||||||
t = len(text.strip())
|
|
||||||
return max(1, math.ceil(t / 4))
|
|
||||||
|
|
||||||
def split_sentences(text: str) -> list[str]:
|
|
||||||
text = _WS.sub(' ', text.strip())
|
|
||||||
if not text: return []
|
|
||||||
parts = _SENT_SPLIT.split(text)
|
|
||||||
return [p.strip() for p in parts if p.strip()]
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class RawBlock:
|
|
||||||
kind: str
|
|
||||||
text: str
|
|
||||||
level: Optional[int]
|
|
||||||
section_path: str
|
|
||||||
section_title: Optional[str]
|
|
||||||
|
|
||||||
@dataclass
|
|
||||||
class Chunk:
|
|
||||||
id: str
|
|
||||||
note_id: str
|
|
||||||
index: int
|
|
||||||
text: str # Reintext für Anzeige (inkl. injizierter Links bei LLM/Heading)
|
|
||||||
window: str # Text + Context für Embeddings
|
|
||||||
token_count: int
|
|
||||||
section_title: Optional[str]
|
|
||||||
section_path: str
|
|
||||||
neighbors_prev: Optional[str]
|
|
||||||
neighbors_next: Optional[str]
|
|
||||||
char_start: int
|
|
||||||
char_end: int
|
|
||||||
|
|
||||||
# --- Markdown Parser ---
|
|
||||||
def parse_blocks(md_text: str) -> Tuple[List[RawBlock], str]:
|
|
||||||
"""Parst MD und gibt Blöcke UND den H1 Titel zurück."""
|
|
||||||
|
|
||||||
md = MarkdownIt("commonmark").enable("table")
|
|
||||||
tokens: List[Token] = md.parse(md_text)
|
|
||||||
|
|
||||||
blocks: List[RawBlock] = []
|
|
||||||
h1_title = "Dokument"
|
|
||||||
h2, h3 = None, None
|
|
||||||
section_path = "/"
|
|
||||||
|
|
||||||
# Rudimentäres Block-Parsing für non-LLM Strategien
|
|
||||||
fm, text_without_fm = extract_frontmatter_from_text(md_text)
|
|
||||||
|
|
||||||
if text_without_fm.strip():
|
|
||||||
blocks.append(RawBlock(kind="paragraph", text=text_without_fm.strip(),
|
|
||||||
level=None, section_path=section_path, section_title=h2))
|
|
||||||
|
|
||||||
# H1 Titel Extraktion (für Context Injection in by_heading)
|
|
||||||
h1_match = re.search(r'^#\s+(.*)', text_without_fm, re.MULTILINE)
|
|
||||||
if h1_match:
|
|
||||||
h1_title = h1_match.group(1).strip()
|
|
||||||
|
|
||||||
return blocks, h1_title
|
|
||||||
|
|
||||||
# ==========================================
|
|
||||||
# 4. STRATEGIES (SYNCHRON)
|
|
||||||
# ==========================================
|
|
||||||
|
|
||||||
def _strategy_sliding_window(blocks: List[RawBlock], config: Dict[str, Any], note_id: str, context_prefix: str = "") -> List[Chunk]:
|
|
||||||
"""Klassisches Sliding Window."""
|
|
||||||
target = config.get("target", 400)
|
|
||||||
max_tokens = config.get("max", 600)
|
|
||||||
overlap_val = config.get("overlap", (50, 80))
|
|
||||||
overlap = sum(overlap_val) // 2 if isinstance(overlap_val, tuple) else overlap_val
|
|
||||||
|
|
||||||
chunks: List[Chunk] = []
|
|
||||||
buf: List[RawBlock] = []
|
|
||||||
|
|
||||||
def flush_buffer():
|
|
||||||
nonlocal buf
|
|
||||||
if not buf: return
|
|
||||||
text_body = "\n\n".join([b.text for b in buf])
|
|
||||||
sec_title = buf[-1].section_title if buf else None
|
|
||||||
sec_path = buf[-1].section_path if buf else "/"
|
|
||||||
window_body = f"{context_prefix}\n{text_body}".strip() if context_prefix else text_body
|
|
||||||
|
|
||||||
if estimate_tokens(text_body) > max_tokens:
|
|
||||||
sentences = split_sentences(text_body)
|
|
||||||
current_sents = []
|
|
||||||
cur_toks = 0
|
|
||||||
for s in sentences:
|
|
||||||
st = estimate_tokens(s)
|
|
||||||
if cur_toks + st > target and current_sents:
|
|
||||||
txt = "\n".join(current_sents)
|
|
||||||
win = f"{context_prefix}\n{txt}".strip() if context_prefix else txt
|
|
||||||
_add_chunk(txt, win, sec_title, sec_path)
|
|
||||||
ov_txt = " ".join(current_sents)[-overlap*4:]
|
|
||||||
current_sents = [ov_txt, s] if ov_txt else [s]
|
|
||||||
cur_toks = estimate_tokens(" ".join(current_sents))
|
|
||||||
else:
|
|
||||||
current_sents.append(s)
|
|
||||||
cur_toks += st
|
|
||||||
if current_sents:
|
|
||||||
txt = "\n".join(current_sents)
|
|
||||||
win = f"{context_prefix}\n{txt}".strip() if context_prefix else txt
|
|
||||||
_add_chunk(txt, win, sec_title, sec_path)
|
|
||||||
else:
|
else:
|
||||||
_add_chunk(text_body, window_body, sec_title, sec_path)
|
return []
|
||||||
buf = []
|
|
||||||
|
|
||||||
def _add_chunk(txt, win, sec, path):
|
|
||||||
idx = len(chunks)
|
|
||||||
chunks.append(Chunk(
|
|
||||||
id=f"{note_id}#c{idx:02d}", note_id=note_id, index=idx,
|
|
||||||
text=txt, window=win, token_count=estimate_tokens(txt),
|
|
||||||
section_title=sec, section_path=path,
|
|
||||||
neighbors_prev=None, neighbors_next=None, char_start=0, char_end=0
|
|
||||||
))
|
|
||||||
|
|
||||||
for b in blocks:
|
# 3. [Schritt 4 des Workflows] Kanten pro Chunk zuweisen/filtern
|
||||||
if estimate_tokens("\n\n".join([x.text for x in buf] + [b.text])) >= target:
|
|
||||||
flush_buffer()
|
|
||||||
buf.append(b)
|
|
||||||
flush_buffer()
|
|
||||||
return chunks
|
|
||||||
|
|
||||||
def _strategy_by_heading(blocks: List[RawBlock], config: Dict[str, Any], note_id: str, doc_title: str) -> List[Chunk]:
|
|
||||||
"""Harter Split an Überschriften mit Context Injection."""
|
|
||||||
chunks: List[Chunk] = []
|
|
||||||
sections: Dict[str, List[RawBlock]] = {}
|
|
||||||
ordered = []
|
|
||||||
|
|
||||||
for b in blocks:
|
unassigned_edges: Set[str] = set(all_note_edges_list)
|
||||||
if b.kind == "heading": continue
|
llm_tasks = []
|
||||||
if b.section_path not in sections:
|
|
||||||
sections[b.section_path] = []
|
for chunk in chunks:
|
||||||
ordered.append(b.section_path)
|
# Starte den LLM-Filter-Call für jeden Chunk parallel
|
||||||
sections[b.section_path].append(b)
|
task = analyzer.filter_edges_for_chunk(chunk.text, all_note_edges_list, note_type)
|
||||||
|
llm_tasks.append(task)
|
||||||
|
|
||||||
for path in ordered:
|
# Warte auf alle LLM-Antworten (Batch-Processing)
|
||||||
s_blocks = sections[path]
|
filtered_edges_results: List[List[str]] = await asyncio.gather(*llm_tasks)
|
||||||
if not s_blocks: continue
|
|
||||||
|
|
||||||
|
# 4. Ergebnisse zuweisen und Unassigned Edges sammeln
|
||||||
|
for i, filtered_edges_list in enumerate(filtered_edges_results):
|
||||||
|
chunk = chunks[i]
|
||||||
|
|
||||||
breadcrumbs = path.strip("/").replace("/", " > ")
|
# Lege die vom LLM gefilterten Edges in den Chunk-Payload
|
||||||
context_header = f"# {doc_title}\n## {breadcrumbs}"
|
# Die Chunk-Klasse muss ein Feld 'suggested_edges' haben (wie im alten SemanticChunkResult)
|
||||||
full_text = "\n\n".join([b.text for b in s_blocks])
|
chunk.suggested_edges = filtered_edges_list
|
||||||
|
|
||||||
if estimate_tokens(full_text) <= config.get("max", 600):
|
# Unassigned Edges: Subtrahiere alle Edges, die in diesem Chunk gefunden wurden
|
||||||
chunks.append(Chunk(
|
unassigned_edges.difference_update(set(filtered_edges_list))
|
||||||
id=f"{note_id}#c{len(chunks):02d}", note_id=note_id, index=len(chunks),
|
|
||||||
text=full_text, window=f"{context_header}\n{full_text}",
|
|
||||||
token_count=estimate_tokens(full_text),
|
|
||||||
section_title=s_blocks[0].section_title if s_blocks else None,
|
|
||||||
section_path=path,
|
|
||||||
neighbors_prev=None, neighbors_next=None, char_start=0, char_end=0
|
|
||||||
))
|
|
||||||
else:
|
|
||||||
# Fallback auf Sliding Window mit Context Injection
|
|
||||||
sub = _strategy_sliding_window(s_blocks, config, note_id, context_prefix=context_header)
|
|
||||||
base = len(chunks)
|
|
||||||
for i, sc in enumerate(sub):
|
|
||||||
sc.index = base + i
|
|
||||||
sc.id = f"{note_id}#c{sc.index:02d}"
|
|
||||||
chunks.append(sc)
|
|
||||||
return chunks
|
|
||||||
|
|
||||||
# ==========================================
|
|
||||||
# 5. STRATEGY (ASYNCHRON)
|
|
||||||
# ==========================================
|
|
||||||
|
|
||||||
# Singleton Instanz für den Analyzer
|
|
||||||
_semantic_analyzer_instance = None
|
|
||||||
|
|
||||||
def _get_semantic_analyzer_instance() -> SemanticAnalyzer:
|
|
||||||
"""Liefert die Singleton-Instanz des SemanticAnalyzer."""
|
|
||||||
global _semantic_analyzer_instance
|
|
||||||
if _semantic_analyzer_instance is None:
|
|
||||||
_semantic_analyzer_instance = SemanticAnalyzer()
|
|
||||||
return _semantic_analyzer_instance
|
|
||||||
|
|
||||||
async def _strategy_semantic_llm(md_text: str, config: Dict[str, Any], note_id: str, note_type: str) -> List[Chunk]:
|
|
||||||
"""
|
|
||||||
Strategie: Delegiert die Zerlegung und Kanten-Extraktion an ein LLM (Async).
|
|
||||||
"""
|
|
||||||
analyzer = _get_semantic_analyzer_instance()
|
|
||||||
|
|
||||||
# Text-Splitting wird hier vom LLM übernommen
|
# 5. [Schritt 5 des Workflows] Fallback: Nicht zugeordnete Kanten zuweisen
|
||||||
semantic_chunks: List[SemanticChunkResult] = await analyzer.analyze_and_chunk(md_text, note_type)
|
# Alle Kanten, die in KEINEM Chunk explizit zugewiesen wurden, werden JEDEM Chunk zugewiesen.
|
||||||
|
unassigned_edges_list = list(unassigned_edges)
|
||||||
|
|
||||||
chunks: List[Chunk] = []
|
if unassigned_edges_list:
|
||||||
|
logger.info(f"Adding {len(unassigned_edges_list)} unassigned edges as fallback to all chunks for note {note_id}")
|
||||||
for i, sc in enumerate(semantic_chunks):
|
|
||||||
# 1. Edge Injection für derive_edges.py
|
for chunk in chunks:
|
||||||
injection_block = "\n"
|
# Füge die unassigned Edges hinzu (Set-Operation für Duplikat-Schutz)
|
||||||
for edge_str in sc.suggested_edges:
|
existing_edges = set(chunk.suggested_edges)
|
||||||
# Stellt sicher, dass das Split-Ergebnis 2 Teile hat
|
chunk.suggested_edges = list(existing_edges.union(unassigned_edges_list))
|
||||||
if ":" in edge_str:
|
|
||||||
kind, target = edge_str.split(":", 1)
|
|
||||||
# Nutzt die Syntax: [[rel:kind | Target]]
|
|
||||||
injection_block += f"[[rel:{kind} | {target}]] "
|
|
||||||
|
|
||||||
full_text = sc.content + injection_block
|
# 6. Return Chunks
|
||||||
|
|
||||||
# 2. Chunk Objekt bauen
|
|
||||||
chunks.append(Chunk(
|
|
||||||
id=f"{note_id}#sem{i:02d}",
|
|
||||||
note_id=note_id,
|
|
||||||
index=i,
|
|
||||||
text=full_text.strip(),
|
|
||||||
window=full_text.strip(),
|
|
||||||
token_count=estimate_tokens(full_text),
|
|
||||||
section_title="Semantic Section",
|
|
||||||
section_path="/LLM",
|
|
||||||
neighbors_prev=None, neighbors_next=None,
|
|
||||||
char_start=0, char_end=0
|
|
||||||
))
|
|
||||||
|
|
||||||
return chunks
|
return chunks
|
||||||
|
|
||||||
# ==========================================
|
# --- UPDATE DISPATCHER: chunk_note_async ---
|
||||||
# 6. MAIN ENTRY POINT (ASYNC)
|
async def chunk_note_async(md_text: str, note_id: str, note_type: str, note_status: str, path_arg: str = None) -> List[Chunk]:
|
||||||
# ==========================================
|
|
||||||
|
|
||||||
async def assemble_chunks(note_id: str, md_text: str, note_type: str) -> List[Chunk]:
|
|
||||||
"""
|
|
||||||
Hauptfunktion. Analysiert Config und wählt Strategie (MUSS ASYNC SEIN).
|
|
||||||
Enthält die Logik zur Vermeidung des Double-LLM-Effekts.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# 1. Frontmatter prüfen (Double-LLM-Prevention)
|
# ... bestehender Code (Frontmatter, Config, etc.)
|
||||||
# Nutzen der lokalen, robusten Funktion
|
|
||||||
fm, body = extract_frontmatter_from_text(md_text)
|
|
||||||
note_status = fm.get("status", "").lower()
|
|
||||||
|
|
||||||
config = get_chunk_config(note_type)
|
|
||||||
strategy = config.get("strategy", "sliding_window")
|
|
||||||
|
|
||||||
# 2. Strategie-Auswahl
|
|
||||||
|
|
||||||
# Wenn der Typ LLM-Chunking nutzt (semantic_llm),
|
|
||||||
# ABER der Status ist 'draft' (wahrscheinlich vom LLM generiert):
|
|
||||||
if strategy == "semantic_llm" and note_status in ["draft", "initial_gen"]:
|
|
||||||
# Setze auf die zweitbeste, aber synchrone und deterministische Strategie
|
|
||||||
print(f"INFO: Overriding '{strategy}' for draft status. Using 'by_heading' instead.")
|
|
||||||
strategy = "by_heading"
|
|
||||||
|
|
||||||
# 3. Execution (Dispatcher)
|
# 3. Execution (Dispatcher)
|
||||||
|
|
||||||
# Der Text, der an die Chunker-Strategie geht.
|
# Update: Rufe die NEUE Strategie auf, wenn 'semantic_llm' konfiguriert ist.
|
||||||
md_to_chunk = md_text
|
|
||||||
|
|
||||||
if strategy == "semantic_llm":
|
if strategy == "semantic_llm":
|
||||||
# LLM-Strategie nutzt den gesamten MD-Text zur Orientierung
|
chunks = await _strategy_smart_edge_allocation(md_text, config, note_id, note_type)
|
||||||
chunks = await _strategy_semantic_llm(md_to_chunk, config, note_id, note_type)
|
|
||||||
|
|
||||||
elif strategy == "by_heading":
|
elif strategy == "by_heading":
|
||||||
blocks, doc_title = parse_blocks(md_to_chunk)
|
blocks, doc_title = parse_blocks(md_text)
|
||||||
# Synchronen Code in einem Thread ausführen
|
# ... bestehender Code
|
||||||
chunks = await asyncio.to_thread(_strategy_by_heading, blocks, config, note_id, doc_title)
|
|
||||||
|
|
||||||
else: # sliding_window (Default)
|
else: # sliding_window (Default)
|
||||||
blocks, doc_title = parse_blocks(md_to_chunk)
|
blocks, doc_title = parse_blocks(md_text)
|
||||||
# Synchronen Code in einem Thread ausführen
|
# ... bestehender Code
|
||||||
chunks = await asyncio.to_thread(_strategy_sliding_window, blocks, config, note_id)
|
|
||||||
|
# ... bestehender Code (Post-Processing)
|
||||||
# 4. Post-Process: Neighbors setzen
|
|
||||||
for i, ch in enumerate(chunks):
|
|
||||||
ch.neighbors_prev = chunks[i-1].id if i > 0 else None
|
|
||||||
ch.neighbors_next = chunks[i+1].id if i < len(chunks)-1 else None
|
|
||||||
|
|
||||||
return chunks
|
|
||||||
|
|
@ -1,136 +1,87 @@
|
||||||
"""
|
"""
|
||||||
app/services/semantic_analyzer.py
|
app/services/semantic_analyzer.py — Edge Validation & Filtering
|
||||||
Kapselt die LLM-Strategie für Chunking und Kanten-Extraktion.
|
Der Service ist nun primär dafür zuständig, Kanten aus einer Liste dem gegebenen Chunk zuzuordnen.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import re
|
|
||||||
from typing import List, Dict, Any, Optional
|
from typing import List, Dict, Any, Optional
|
||||||
from dataclasses import dataclass
|
|
||||||
|
|
||||||
# Import der benötigten Services (Annahme: llm_service und discovery sind verfügbar.)
|
# Import der benötigten Services (Annahme: llm_service ist verfügbar.)
|
||||||
from app.services.llm_service import LLMService
|
from app.services.llm_service import LLMService
|
||||||
from app.services.discovery import DiscoveryService
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@dataclass
|
# Ein Singleton-Muster für den Analyzer (wie zuvor)
|
||||||
class SemanticChunkResult:
|
_analyzer_instance: Optional['SemanticAnalyzer'] = None
|
||||||
content: str
|
|
||||||
suggested_edges: List[str] # Format: "kind:Target"
|
def get_semantic_analyzer():
|
||||||
|
global _analyzer_instance
|
||||||
|
if _analyzer_instance is None:
|
||||||
|
_analyzer_instance = SemanticAnalyzer()
|
||||||
|
return _analyzer_instance
|
||||||
|
|
||||||
class SemanticAnalyzer:
|
class SemanticAnalyzer:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
# Der DiscoveryService wird hier nicht mehr direkt benötigt.
|
||||||
self.llm = LLMService()
|
self.llm = LLMService()
|
||||||
self.discovery = DiscoveryService()
|
|
||||||
self.MAX_CONTEXT_TOKENS = 3000
|
|
||||||
|
|
||||||
async def analyze_and_chunk(self, text: str, source_type: str) -> List[SemanticChunkResult]:
|
async def filter_edges_for_chunk(self, chunk_text: str, all_note_edges: List[str], note_type: str) -> List[str]:
|
||||||
"""
|
"""
|
||||||
Zerlegt Text mittels LLM in semantische Abschnitte und extrahiert Kanten.
|
[Schritt 4 des Workflows] Sendet Chunk und alle Kanten an LLM, um die relevanten Kanten für diesen Chunk zu filtern.
|
||||||
|
:param chunk_text: Der Text des Chunks zur Analyse.
|
||||||
|
:param all_note_edges: Alle für die gesamte Notiz gefundenen Kanten (Format: "kind:Target").
|
||||||
|
:param note_type: Der Typ der Notiz.
|
||||||
|
:return: Liste der relevanten Kanten für diesen Chunk.
|
||||||
"""
|
"""
|
||||||
system_prompt = (
|
if not all_note_edges:
|
||||||
"Du bist ein Knowledge Graph Experte. Deine Aufgabe ist es, Rohtext in "
|
return []
|
||||||
"thematisch geschlossene Abschnitte (Chunks) zu zerlegen.\n"
|
|
||||||
"Analysiere jeden Abschnitt auf Beziehungen zu anderen Konzepten (Entitäten, Personen, etc.).\n"
|
edge_list_str = "\n".join([f"- {e}" for e in all_note_edges])
|
||||||
"Antworte AUSSCHLIESSLICH mit validem JSON in diesem Format:\n"
|
|
||||||
"[\n"
|
|
||||||
" {\n"
|
|
||||||
" \"content\": \"Der Text des Abschnitts...\",\n"
|
|
||||||
" \"relations\": [{\"target\": \"Entität X\", \"type\": \"related_to\"}]\n"
|
|
||||||
" }\n"
|
|
||||||
"]\n"
|
|
||||||
"Halte die Chunks mittellang (ca. 100-300 Wörter). Verändere den Inhalt nicht, nur die Struktur."
|
|
||||||
)
|
|
||||||
|
|
||||||
user_prompt = f"Dokument-Typ: {source_type}\n\nTEXT:\n{text}"
|
system_prompt = (
|
||||||
|
"Du bist ein Edge Filter Agent. Deine Aufgabe ist es, aus einer gegebenen Liste von potentiellen "
|
||||||
|
"Knowledge Graph Kanten (Edges) jene auszuwählen, die *semantisch relevant* für den vorliegenden "
|
||||||
|
"Textausschnitt sind. Alle Kanten beziehen sich auf die Hauptnotiz.\n"
|
||||||
|
"Antworte AUSSCHLIESSLICH mit einer validen JSON-Liste von Kanten-Strings, die im Text direkt erwähnt oder "
|
||||||
|
"klar impliziert werden. Es ist KEIN Array von Objekten, sondern ein Array von Strings.\n"
|
||||||
|
"Format: [\"kind:Target\", \"kind:Target\", ...]\n"
|
||||||
|
"Wähle nur Kanten, die der Chunk *aktiv* benötigt oder referenziert."
|
||||||
|
)
|
||||||
|
|
||||||
|
user_prompt = (
|
||||||
|
f"Notiz-Typ: {note_type}\n"
|
||||||
|
f"Textausschnitt:\n---\n{chunk_text}\n---\n\n"
|
||||||
|
f"Gesamte Kanten der Notiz (AUSWAHL):\n{edge_list_str}\n\n"
|
||||||
|
"Welche der oben genannten Kanten sind für diesen Textabschnitt relevant? Liste sie im JSON-Array auf."
|
||||||
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# 2. LLM Call (Async)
|
# 1. LLM Call
|
||||||
response_json = await self.llm.generate_raw_response(
|
response_json = await self.llm.generate_raw_response(
|
||||||
user_prompt,
|
user_prompt,
|
||||||
system=system_prompt,
|
system=system_prompt,
|
||||||
force_json=True
|
force_json=True
|
||||||
)
|
)
|
||||||
|
|
||||||
# 3. JSON Parsing & Validierung
|
# 2. Robustes JSON Parsing
|
||||||
clean_json = response_json.replace("```json", "").replace("```", "").strip()
|
clean_json = response_json.replace("```json", "").replace("```", "").strip()
|
||||||
data = json.loads(clean_json)
|
data = json.loads(clean_json)
|
||||||
|
|
||||||
# FIX: Typsicherheit auf der Wurzel
|
if isinstance(data, list):
|
||||||
if isinstance(data, dict):
|
# Filtere nach Strings, die den Doppelpunkt enthalten, um das Format "kind:Target" zu garantieren.
|
||||||
# LLM hat ein Einzelobjekt geliefert -> wandle es in ein Array
|
return [s for s in data if isinstance(s, str) and ":" in s]
|
||||||
data = [data]
|
|
||||||
elif not isinstance(data, list):
|
|
||||||
logger.error("SemanticAnalyzer: JSON root ist weder Array noch Objekt. Fehlerhafte LLM-Antwort.")
|
|
||||||
raise ValueError("Root element is not a list or dictionary.")
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for item in data:
|
|
||||||
# Typsicherheit auf Item-Ebene
|
|
||||||
if not isinstance(item, dict):
|
|
||||||
logger.warning(f"SemanticAnalyzer: Ungültiges Chunk-Element ignoriert: {item}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
content = item.get("content", "").strip()
|
|
||||||
if not content: continue
|
|
||||||
|
|
||||||
raw_rels = item.get("relations", [])
|
|
||||||
refined_edges = []
|
|
||||||
|
|
||||||
for rel in raw_rels:
|
|
||||||
# Typsicherheit auf Relation-Ebene
|
|
||||||
if not isinstance(rel, dict):
|
|
||||||
logger.warning(f"SemanticAnalyzer: Ignoriere ungültige Relation: {rel}")
|
|
||||||
continue
|
|
||||||
|
|
||||||
target = rel.get("target")
|
|
||||||
raw_type = rel.get("type", "related_to")
|
|
||||||
|
|
||||||
if target:
|
|
||||||
# 1. Typ-Auflösung (für Matrix)
|
|
||||||
target_entity_type = self._get_target_type_from_title(target)
|
|
||||||
|
|
||||||
# 2. Matrix-Logik anwenden:
|
|
||||||
final_kind = self.discovery._resolve_edge_type(source_type, target_entity_type)
|
|
||||||
|
|
||||||
# 3. Priorisierung: Wählt den Matrix-Vorschlag, wenn er spezifischer ist.
|
|
||||||
if final_kind not in ["related_to", "references"] and target_entity_type != "concept":
|
|
||||||
edge_str = f"{final_kind}:{target}"
|
|
||||||
else:
|
|
||||||
edge_str = f"{raw_type}:{target}"
|
|
||||||
|
|
||||||
refined_edges.append(edge_str)
|
|
||||||
|
|
||||||
results.append(SemanticChunkResult(content=content, suggested_edges=refined_edges))
|
|
||||||
|
|
||||||
return results
|
|
||||||
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
logger.error("SemanticAnalyzer: LLM lieferte KEIN valides JSON. Fallback auf Raw Text.")
|
|
||||||
return [SemanticChunkResult(content=text, suggested_edges=[])]
|
|
||||||
except Exception as e:
|
|
||||||
logger.error(f"SemanticAnalyzer Unbehandelter Fehler: {e}")
|
|
||||||
return [SemanticChunkResult(content=text, suggested_edges=[])]
|
|
||||||
|
|
||||||
# NEU: Helper zur Abfrage des Typs (muss die bestehenden Funktionen nutzen)
|
|
||||||
def _get_target_type_from_title(self, title: str) -> str:
|
|
||||||
"""Simuliert den Abruf des Notiztyps basierend auf dem Titel aus dem Index (für Matrix-Logik)."""
|
|
||||||
|
|
||||||
title_lower = title.lower()
|
|
||||||
|
|
||||||
if "leitbild-werte" in title_lower or "integrität" in title_lower:
|
|
||||||
return "value"
|
|
||||||
if "leitbild-prinzipien" in title_lower:
|
|
||||||
return "principle"
|
|
||||||
if "leitbild-rollen" in title_lower:
|
|
||||||
return "profile"
|
|
||||||
if "leitbild-rituale-system" in title_lower:
|
|
||||||
return "concept"
|
|
||||||
|
|
||||||
return "concept"
|
logger.warning(f"SemanticAnalyzer: LLM lieferte non-list beim Edge-Filtern: {data}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
logger.error(f"SemanticAnalyzer: LLM lieferte KEIN valides JSON beim Edge-Filtern: {e}")
|
||||||
|
return []
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"SemanticAnalyzer Unbehandelter Fehler beim Edge-Filtern: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
|
# Stellt sicher, dass der AsyncClient geschlossen wird (gute Praxis)
|
||||||
if self.llm:
|
if self.llm:
|
||||||
await self.llm.close()
|
await self.llm.close()
|
||||||
|
|
@ -1,41 +1,42 @@
|
||||||
version: 1.2 # Update für Smart Chunking Config
|
version: 1.3 # Update für Smart Edge Allocation
|
||||||
|
|
||||||
# --- CHUNKING DEFINITIONEN ---
|
# --- CHUNKING DEFINITIONEN ---
|
||||||
# Hier definieren wir die technischen Strategien zentral.
|
# Hier definieren wir die technischen Strategien und den Smart Edge Filter.
|
||||||
chunking_profiles:
|
chunking_profiles:
|
||||||
# Standard für Fließtexte (Sliding Window)
|
|
||||||
|
# 1. Standard Profile (Sliding Window, KEIN LLM-Filter)
|
||||||
sliding_short:
|
sliding_short:
|
||||||
strategy: sliding_window
|
strategy: sliding_window
|
||||||
|
enable_smart_edge_allocation: false # Sekundärverfeinerung deaktiviert
|
||||||
target: 200
|
target: 200
|
||||||
max: 350
|
max: 350
|
||||||
overlap: [30, 50]
|
overlap: [30, 50]
|
||||||
|
|
||||||
sliding_standard:
|
# 2. Smart Edge Allocation Profile (Sliding Window + LLM-Filter)
|
||||||
strategy: sliding_window
|
sliding_smart_edges:
|
||||||
|
strategy: sliding_window # Primärzerlegung: Sliding Window
|
||||||
|
enable_smart_edge_allocation: true # SEKUNDÄRVERFEINERUNG: LLM-Filter aktiv
|
||||||
target: 400
|
target: 400
|
||||||
max: 600
|
max: 600
|
||||||
overlap: [50, 80]
|
overlap: [50, 80]
|
||||||
|
|
||||||
sliding_large:
|
# 3. Strukturierte Profile (By Heading, KEIN LLM-Filter)
|
||||||
strategy: sliding_window
|
|
||||||
target: 500
|
|
||||||
max: 800
|
|
||||||
overlap: [60, 100]
|
|
||||||
|
|
||||||
# Smart Chunking für Strukturen (Harte Splits)
|
|
||||||
structured_strict:
|
structured_strict:
|
||||||
strategy: by_heading
|
strategy: by_heading
|
||||||
|
enable_smart_edge_allocation: false
|
||||||
split_level: 2
|
split_level: 2
|
||||||
max: 600 # Fallback Limit
|
max: 600
|
||||||
target: 400 # Fallback Target bei Sub-Chunking
|
target: 400
|
||||||
overlap: [50, 80] # Overlap bei Sub-Chunking
|
overlap: [50, 80]
|
||||||
|
|
||||||
# NEU: LLM-basierte semantische Zerlegung (Chunker.py ruft semantic_analyzer.py)
|
# 4. Strukturierte Profile (By Heading + LLM-Filter)
|
||||||
semantic_llm:
|
structured_smart_edges:
|
||||||
strategy: semantic_llm
|
strategy: by_heading # Primärzerlegung: Harte Trennung
|
||||||
# Da das LLM die Längensteuerung übernimmt, dienen diese als Fallback/Empfehlung
|
enable_smart_edge_allocation: true # SEKUNDÄRVERFEINERUNG: LLM-Filter aktiv
|
||||||
target: 400
|
split_level: 2
|
||||||
max: 800
|
max: 600
|
||||||
|
target: 400
|
||||||
|
overlap: [50, 80]
|
||||||
|
|
||||||
defaults:
|
defaults:
|
||||||
retriever_weight: 1.0
|
retriever_weight: 1.0
|
||||||
|
|
@ -45,12 +46,12 @@ defaults:
|
||||||
types:
|
types:
|
||||||
# --- WISSENSBAUSTEINE ---
|
# --- WISSENSBAUSTEINE ---
|
||||||
concept:
|
concept:
|
||||||
chunking_profile: sliding_standard
|
chunking_profile: sliding_smart_edges # Nutzt Kantenfilterung
|
||||||
retriever_weight: 0.60
|
retriever_weight: 0.60
|
||||||
edge_defaults: ["references", "related_to"]
|
edge_defaults: ["references", "related_to"]
|
||||||
|
|
||||||
source:
|
source:
|
||||||
chunking_profile: sliding_standard
|
chunking_profile: sliding_short # Kein LLM-Filter
|
||||||
retriever_weight: 0.50
|
retriever_weight: 0.50
|
||||||
edge_defaults: []
|
edge_defaults: []
|
||||||
|
|
||||||
|
|
@ -61,17 +62,17 @@ types:
|
||||||
|
|
||||||
# --- IDENTITÄT & PERSÖNLICHKEIT ---
|
# --- IDENTITÄT & PERSÖNLICHKEIT ---
|
||||||
profile:
|
profile:
|
||||||
chunking_profile: structured_strict
|
chunking_profile: structured_smart_edges # Strukturiert + Kantenfilterung
|
||||||
retriever_weight: 0.70
|
retriever_weight: 0.70
|
||||||
edge_defaults: ["references", "related_to"]
|
edge_defaults: ["references", "related_to"]
|
||||||
|
|
||||||
value:
|
value:
|
||||||
chunking_profile: structured_strict
|
chunking_profile: structured_smart_edges
|
||||||
retriever_weight: 1.00
|
retriever_weight: 1.00
|
||||||
edge_defaults: ["related_to"]
|
edge_defaults: ["related_to"]
|
||||||
|
|
||||||
principle:
|
principle:
|
||||||
chunking_profile: structured_strict
|
chunking_profile: structured_smart_edges
|
||||||
retriever_weight: 0.95
|
retriever_weight: 0.95
|
||||||
edge_defaults: ["derived_from", "references"]
|
edge_defaults: ["derived_from", "references"]
|
||||||
|
|
||||||
|
|
@ -81,18 +82,18 @@ types:
|
||||||
edge_defaults: ["related_to"]
|
edge_defaults: ["related_to"]
|
||||||
|
|
||||||
experience:
|
experience:
|
||||||
chunking_profile: sliding_standard
|
chunking_profile: sliding_smart_edges
|
||||||
retriever_weight: 0.90
|
retriever_weight: 0.90
|
||||||
edge_defaults: ["derived_from", "references"]
|
edge_defaults: ["derived_from", "references"]
|
||||||
|
|
||||||
# --- STRATEGIE & ENTSCHEIDUNG ---
|
# --- STRATEGIE & ENTSCHEIDUNG ---
|
||||||
goal:
|
goal:
|
||||||
chunking_profile: sliding_standard
|
chunking_profile: sliding_smart_edges
|
||||||
retriever_weight: 0.95
|
retriever_weight: 0.95
|
||||||
edge_defaults: ["depends_on", "related_to"]
|
edge_defaults: ["depends_on", "related_to"]
|
||||||
|
|
||||||
decision:
|
decision:
|
||||||
chunking_profile: structured_strict
|
chunking_profile: structured_smart_edges
|
||||||
retriever_weight: 1.00
|
retriever_weight: 1.00
|
||||||
edge_defaults: ["caused_by", "references"]
|
edge_defaults: ["caused_by", "references"]
|
||||||
|
|
||||||
|
|
@ -108,7 +109,7 @@ types:
|
||||||
|
|
||||||
# --- OPERATIV ---
|
# --- OPERATIV ---
|
||||||
project:
|
project:
|
||||||
chunking_profile: sliding_large
|
chunking_profile: sliding_smart_edges
|
||||||
retriever_weight: 0.97
|
retriever_weight: 0.97
|
||||||
edge_defaults: ["references", "depends_on"]
|
edge_defaults: ["references", "depends_on"]
|
||||||
|
|
||||||
|
|
@ -118,7 +119,6 @@ types:
|
||||||
edge_defaults: ["depends_on", "part_of"]
|
edge_defaults: ["depends_on", "part_of"]
|
||||||
|
|
||||||
journal:
|
journal:
|
||||||
# NEUE ZUWEISUNG: Journale profitieren am meisten von der semantischen Analyse
|
chunking_profile: sliding_smart_edges # Fließtext + Kantenfilterung
|
||||||
chunking_profile: semantic_llm
|
|
||||||
retriever_weight: 0.80
|
retriever_weight: 0.80
|
||||||
edge_defaults: ["references", "related_to"]
|
edge_defaults: ["references", "related_to"]
|
||||||
154
tests/test_final_wp15_validation.py
Normal file
154
tests/test_final_wp15_validation.py
Normal file
|
|
@ -0,0 +1,154 @@
|
||||||
|
# tests/test_final_wp15_validation.py
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import unittest
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Dict, Any
|
||||||
|
|
||||||
|
# --- PFAD-KORREKTUR ---
|
||||||
|
ROOT_DIR = Path(__file__).resolve().parent.parent
|
||||||
|
sys.path.insert(0, str(ROOT_DIR))
|
||||||
|
# ----------------------
|
||||||
|
|
||||||
|
# Import der Kernkomponenten
|
||||||
|
from app.core import chunker
|
||||||
|
from app.core import derive_edges
|
||||||
|
from app.services.semantic_analyzer import SemanticAnalyzer
|
||||||
|
|
||||||
|
# 1. Hilfsfunktion zur Manipulation der Konfiguration im Test
|
||||||
|
def get_config_for_test(strategy: str, enable_smart_edge: bool) -> Dict[str, Any]:
|
||||||
|
"""Erzeugt eine ad-hoc Konfiguration, um eine Strategie zu erzwingen."""
|
||||||
|
cfg = chunker.get_chunk_config("concept") # Nutze eine Basis
|
||||||
|
cfg['strategy'] = strategy
|
||||||
|
cfg['enable_smart_edge_allocation'] = enable_smart_edge
|
||||||
|
return cfg
|
||||||
|
|
||||||
|
# 2. Test-Daten (Muss die Entitäten aus den Vault-Dateien verwenden)
|
||||||
|
TEST_NOTE_ID = "20251212-test-integration"
|
||||||
|
TEST_NOTE_TYPE = "concept" # Kann eine beliebige Basis sein
|
||||||
|
|
||||||
|
# Text, der die Matrix-Logik und Header triggert
|
||||||
|
TEST_MARKDOWN_SMART = """
|
||||||
|
---
|
||||||
|
id: 20251212-test-integration
|
||||||
|
title: Integrationstest - Smart Edges
|
||||||
|
type: concept
|
||||||
|
status: active
|
||||||
|
---
|
||||||
|
# Teil 1: Intro
|
||||||
|
Dies ist die Einleitung. Wir definieren unsere Mission: Präsent sein und vorleben.
|
||||||
|
Dies entspricht unseren Werten [[leitbild-werte#Integrität]] und [[leitbild-werte#Respekt]].
|
||||||
|
|
||||||
|
## Teil 2: Rollenkonflikt
|
||||||
|
Der Konflikt zwischen [[leitbild-rollen#Vater]] und [[leitbild-rollen#Berufsrolle (Umbrella)]] muss gelöst werden.
|
||||||
|
Die Lösung muss [[rel:depends_on leitbild-review#Weekly Review]].
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Text, der nur für Sliding Window geeignet ist
|
||||||
|
TEST_MARKDOWN_SLIDING = """
|
||||||
|
---
|
||||||
|
id: 20251212-test-sliding
|
||||||
|
title: Fließtext Protokoll
|
||||||
|
type: journal
|
||||||
|
status: active
|
||||||
|
---
|
||||||
|
Dies ist ein langer Fließtextabschnitt, der ohne Header auskommt.
|
||||||
|
Er spricht über die neue [[leitbild-prinzipien#P1 Integrität]] Regel und den Ablauf des Tages.
|
||||||
|
Das sollte in zwei Chunks zerlegt werden.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 3. Testklasse
|
||||||
|
class TestFinalWP15Integration(unittest.TestCase):
|
||||||
|
|
||||||
|
# Initiale Ressourcen-Verwaltung (um den AsyncClient zu schließen)
|
||||||
|
_analyzer_instance = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setUpClass(cls):
|
||||||
|
cls._analyzer_instance = SemanticAnalyzer()
|
||||||
|
chunker._semantic_analyzer_instance = cls._analyzer_instance
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tearDownClass(cls):
|
||||||
|
if cls._analyzer_instance:
|
||||||
|
# Nutzt die temporäre Loop-Lösung
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
loop.run_until_complete(cls._analyzer_instance.close())
|
||||||
|
|
||||||
|
# --- A. Smart Edge Allocation Test ---
|
||||||
|
|
||||||
|
def test_a_smart_edge_allocation(self):
|
||||||
|
"""Prüft die neue LLM-Orchestrierung (5 Schritte) und die Kanten-Bindung."""
|
||||||
|
|
||||||
|
config = get_config_for_test('by_heading', enable_smart_edge=True)
|
||||||
|
|
||||||
|
# 1. Chunking (Asynchroner Aufruf der neuen Orchestrierung)
|
||||||
|
chunks = asyncio.run(chunker.assemble_chunks(
|
||||||
|
note_id=TEST_NOTE_ID,
|
||||||
|
md_text=TEST_MARKDOWN_SMART,
|
||||||
|
note_type=TEST_NOTE_TYPE,
|
||||||
|
config=config # Übergibt die ad-hoc Konfiguration (Annahme: assemble_chunks akzeptiert kwargs)
|
||||||
|
))
|
||||||
|
|
||||||
|
# NOTE: Da assemble_chunks die config intern lädt, müssten wir hier idealerweise
|
||||||
|
# die types.yaml zur Laufzeit manipulieren oder die config in kwargs übergeben (letzteres ist hier angenommen).
|
||||||
|
|
||||||
|
# 2. Grundlegende Checks
|
||||||
|
self.assertTrue(len(chunks) >= 2, "A1 Fehler: Primärzerlegung (by_heading) muss mindestens 2 Chunks liefern.")
|
||||||
|
|
||||||
|
# 3. Kanten-Checks (durch derive_edges.py im Chunker ausgelöst)
|
||||||
|
|
||||||
|
# Wir suchen nach der LLM-generierten, spezifischen Kante
|
||||||
|
# Erwartet: Chunk 1/2 enthält die Kante 'derived_from' oder 'based_on' zu 'leitbild-werte'.
|
||||||
|
|
||||||
|
all_edges = []
|
||||||
|
for c in chunks:
|
||||||
|
# Um die Kanten zu erhalten, muss derive_edges manuell aufgerufen werden,
|
||||||
|
# da der Chunker nur den Text injiziert.
|
||||||
|
# Im echten Importer würde build_edges_for_note auf den injizierten Text angewendet.
|
||||||
|
# Hier simulieren wir den Endeffekt, indem wir die injizierten Kanten prüfen:
|
||||||
|
if "suggested_edges" in c.__dict__:
|
||||||
|
all_edges.extend(c.suggested_edges)
|
||||||
|
|
||||||
|
has_matrix_kante = any("based_on:leitbild-werte" in e or "derived_from:leitbild-werte" in e for e in all_edges)
|
||||||
|
|
||||||
|
self.assertTrue(has_matrix_kante,
|
||||||
|
"A2 Fehler: LLM-Kantenfilter hat die Matrix-Logik (value -> based_on/derived_from) nicht angewendet oder erkannt.")
|
||||||
|
|
||||||
|
print("\n✅ Test A: Smart Edge Allocation erfolgreich.")
|
||||||
|
|
||||||
|
# --- B. Abwärtskompatibilität (Legacy Tests) ---
|
||||||
|
|
||||||
|
def test_b_backward_compatibility(self):
|
||||||
|
"""Prüft, ob die alte, reine Sliding Window Strategie (ohne LLM-Filter) noch funktioniert."""
|
||||||
|
|
||||||
|
# Erzwinge das alte, reine Sliding Window Profil
|
||||||
|
config = get_config_for_test('sliding_window', enable_smart_edge=False)
|
||||||
|
|
||||||
|
# 1. Chunking (Sollte *mehrere* Chunks liefern, ohne LLM-Aufruf)
|
||||||
|
# Die Orchestrierung sollte nur den reinen Sliding Window Call nutzen.
|
||||||
|
chunks = asyncio.run(chunker.assemble_chunks(
|
||||||
|
note_id=TEST_NOTE_ID,
|
||||||
|
md_text=TEST_MARKDOWN_SLIDING,
|
||||||
|
note_type='journal',
|
||||||
|
config=config
|
||||||
|
))
|
||||||
|
|
||||||
|
self.assertTrue(len(chunks) >= 2, "B1 Fehler: Reine Sliding Window Strategie ist fehlerhaft oder zerlegt nicht.")
|
||||||
|
|
||||||
|
# 2. Prüfen auf Kanten-Injection (Dürfen NUR aus Wikilinks und Defaults kommen)
|
||||||
|
|
||||||
|
# Die manuelle Wikilink [[leitbild-prinzipien#P1 Integrität]] sollte in JEDEM Chunk sein
|
||||||
|
# wenn Defaults für journal aktiv sind, was falsch ist.
|
||||||
|
# Im reinen Sliding Window Modus (ohne LLM) werden Kanten nur durch derive_edges.py erkannt.
|
||||||
|
# Wir prüfen nur, dass die Chunks existieren.
|
||||||
|
|
||||||
|
self.assertNotIn('suggested_edges', chunks[0].__dict__, "B2 Fehler: LLM-Kantenfilter wurde fälschlicherweise für enable_smart_edge=False ausgeführt.")
|
||||||
|
|
||||||
|
print("\n✅ Test B: Abwärtskompatibilität (reines Sliding Window) erfolgreich.")
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
print("Startet den finalen WP-15 Validierungstest.")
|
||||||
|
unittest.main()
|
||||||
Loading…
Reference in New Issue
Block a user