From 8f65e550c86a77cb77532b848f89f38b85b50c7b Mon Sep 17 00:00:00 2001 From: Lars Date: Mon, 29 Dec 2025 22:16:12 +0100 Subject: [PATCH] =?UTF-8?q?Optimierung=20des=20Chunking-Parsers=20zur=20Un?= =?UTF-8?q?terst=C3=BCtzung=20atomarer=20Bl=C3=B6cke=20und=20Verbesserung?= =?UTF-8?q?=20der=20Satzverarbeitung.=20Aktualisierung=20der=20Sektions-Ch?= =?UTF-8?q?unking-Strategie=20auf=20Version=203.9.0=20mit=20regelkonformer?= =?UTF-8?q?=20Implementierung=20und=20Anpassungen=20an=20der=20Warteschlan?= =?UTF-8?q?gen-Verarbeitung=20f=C3=BCr=20Carry-Over.=20Verbesserte=20Handh?= =?UTF-8?q?abung=20von=20=C3=9Cberschriften=20und=20Metadaten=20zur=20Gew?= =?UTF-8?q?=C3=A4hrleistung=20der=20strukturellen=20Integrit=C3=A4t.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- app/core/chunking/chunking_parser.py | 21 ++--- app/core/chunking/chunking_strategies.py | 99 ++++++++++++------------ 2 files changed, 53 insertions(+), 67 deletions(-) diff --git a/app/core/chunking/chunking_parser.py b/app/core/chunking/chunking_parser.py index 696650a..e55f032 100644 --- a/app/core/chunking/chunking_parser.py +++ b/app/core/chunking/chunking_parser.py @@ -1,6 +1,6 @@ """ FILE: app/core/chunking/chunking_parser.py -DESCRIPTION: Zerlegt Markdown in logische Blöcke. Hält H1-Überschriften im Stream +DESCRIPTION: Zerlegt Markdown in atomare Blöcke. Hält H1-Überschriften im Stream und gewährleistet die strukturelle Integrität von Callouts. """ import re @@ -8,15 +8,12 @@ from typing import List, Tuple, Set from .chunking_models import RawBlock from .chunking_utils import extract_frontmatter_from_text -_WS = re.compile(r'\s+') -_SENT_SPLIT = re.compile(r'(?<=[.!?])\s+(?=[A-ZÄÖÜ0-9„(])') - def split_sentences(text: str) -> list[str]: """Teilt Text in Sätze auf unter Berücksichtigung deutscher Interpunktion.""" - text = _WS.sub(' ', text.strip()) + text = re.sub(r'\s+', ' ', text.strip()) if not text: return [] - # Splittet bei Punkt, Ausrufezeichen oder Fragezeichen, gefolgt von Großbuchstabe - return [p.strip() for p in _SENT_SPLIT.split(text) if p.strip()] + # Splittet bei Satzzeichen, gefolgt von Leerzeichen und Großbuchstaben + return [s.strip() for s in re.split(r'(?<=[.!?])\s+(?=[A-ZÄÖÜ0-9„(])', text) if s.strip()] def parse_blocks(md_text: str) -> Tuple[List[RawBlock], str]: """Zerlegt Text in logische Einheiten (RawBlocks), inklusive H1-H6.""" @@ -24,7 +21,6 @@ def parse_blocks(md_text: str) -> Tuple[List[RawBlock], str]: h1_title = "Dokument"; section_path = "/"; current_section_title = None fm, text_without_fm = extract_frontmatter_from_text(md_text) - # H1 für Note-Titel extrahieren h1_match = re.search(r'^#\s+(.*)', text_without_fm, re.MULTILINE) if h1_match: h1_title = h1_match.group(1).strip() @@ -33,28 +29,23 @@ def parse_blocks(md_text: str) -> Tuple[List[RawBlock], str]: for line in lines: stripped = line.strip() - - # Heading-Erkennung (H1 bis H6) heading_match = re.match(r'^(#{1,6})\s+(.*)', stripped) + if heading_match: if buffer: content = "\n".join(buffer).strip() if content: blocks.append(RawBlock("paragraph", content, None, section_path, current_section_title)) buffer = [] - level = len(heading_match.group(1)) title = heading_match.group(2).strip() - if level == 1: current_section_title = title; section_path = "/" elif level == 2: current_section_title = title; section_path = f"/{current_section_title}" - blocks.append(RawBlock("heading", stripped, level, section_path, current_section_title)) continue - # Trenner oder Leerzeilen beenden Blöcke, außer innerhalb von Callouts if (not stripped or stripped == "---") and not line.startswith('>'): if buffer: content = "\n".join(buffer).strip() @@ -69,7 +60,6 @@ def parse_blocks(md_text: str) -> Tuple[List[RawBlock], str]: if buffer: content = "\n".join(buffer).strip() if content: blocks.append(RawBlock("paragraph", content, None, section_path, current_section_title)) - return blocks, h1_title def parse_edges_robust(text: str) -> Set[str]: @@ -79,7 +69,6 @@ def parse_edges_robust(text: str) -> Set[str]: for kind, target in inlines: k = kind.strip().lower(); t = target.strip() if k and t: found_edges.add(f"{k}:{t}") - lines = text.split('\n'); current_edge_type = None for line in lines: stripped = line.strip() diff --git a/app/core/chunking/chunking_strategies.py b/app/core/chunking/chunking_strategies.py index d3e8cb6..de995fd 100644 --- a/app/core/chunking/chunking_strategies.py +++ b/app/core/chunking/chunking_strategies.py @@ -1,10 +1,7 @@ """ FILE: app/core/chunking/chunking_strategies.py -DESCRIPTION: Strategien für atomares Sektions-Chunking v3.8.5. - Implementiert das 'Pack-and-Carry-Over' Verfahren: - 1. Packt ganze Abschnitte basierend auf Schätzung. - 2. Kein physischer Overflow-Check während des Packens. - 3. Smart-Zerlegung von Übergrößen mit Carry-Over in die Queue. +DESCRIPTION: Universelle Strategie für atomares Sektions-Chunking v3.9.0. + Regelkonforme Implementierung: Pack-Sections, Trust Estimation, Carry-Over. """ from typing import List, Dict, Any, Optional from .chunking_models import RawBlock, Chunk @@ -18,9 +15,6 @@ def _create_win(doc_title: str, sec_title: Optional[str], text: str) -> str: return f"{prefix}\n{text}".strip() if prefix else text def strategy_by_heading(blocks: List[RawBlock], config: Dict[str, Any], note_id: str, doc_title: str = "") -> List[Chunk]: - """ - Universelle Heading-Strategie mit Fallunterscheidung für Smart-Edge-Allocation. - """ smart_edge = config.get("enable_smart_edge_allocation", True) target = config.get("target", 400) max_tokens = config.get("max", 600) @@ -43,10 +37,11 @@ def strategy_by_heading(blocks: List[RawBlock], config: Dict[str, Any], note_id: if not smart_edge: buf = [] for b in blocks: - # Trenne bei jeder Überschrift <= split_level + # Trenne hart bei Überschrift <= split_level if b.kind == "heading" and b.level <= split_level: - # Nur flashen, wenn der Puffer nicht nur aus der aktuellen Überschrift besteht - if buf and not (len(buf) == 1 and buf[0].kind == "heading"): + # Prüfe, ob Puffer mehr als nur Überschriften enthält (keine leeren Chunks) + has_content = any(x.kind != "heading" for x in buf) + if buf and has_content: _emit("\n\n".join([x.text for x in buf]), buf[0].section_title, buf[0].section_path) buf = [] buf.append(b) @@ -54,49 +49,52 @@ def strategy_by_heading(blocks: List[RawBlock], config: Dict[str, Any], note_id: return chunks # --- FALL B: SMART EDGE ALLOCATION (Pack-and-Carry-Over) --- - # 1. Gruppierung in atomare Sektions-Einheiten (Sektions-Isolation) - sections: List[List[RawBlock]] = [] - curr = [] + # 1. Gruppierung in atomare Einheiten (Sektions-Isolation) + sections: List[Dict[str, Any]] = [] + curr_blocks = [] for b in blocks: if b.kind == "heading" and b.level <= split_level: - if curr: sections.append(curr) - curr = [b] - else: curr.append(b) - if curr: sections.append(curr) + if curr_blocks: + sections.append({"text": "\n\n".join([x.text for x in curr_blocks]), "meta": curr_blocks[0]}) + curr_blocks = [b] + else: curr_blocks.append(b) + if curr_blocks: + sections.append({"text": "\n\n".join([x.text for x in curr_blocks]), "meta": curr_blocks[0]}) - # 2. Queue-Management für Carry-Over - processing_queue = [{"blocks": s, "text": "\n\n".join([b.text for b in s])} for s in sections] + # 2. Warteschlangen-Verarbeitung (Regel 1-3) + # Wir nutzen eine Liste als Queue für Carry-Over-Reste + queue = list(sections) current_chunk_text = "" current_meta = {"title": None, "path": "/"} - while processing_queue: - item = processing_queue.pop(0) + while queue: + item = queue.pop(0) item_text = item["text"] - item_tokens = estimate_tokens(item_text) - # Metadaten-Initialisierung falls Chunk leer - if not current_chunk_text and "blocks" in item: - current_meta["title"] = item["blocks"][0].section_title - current_meta["path"] = item["blocks"][0].section_path + # Initialisiere Metadaten für einen neuen Chunk + if not current_chunk_text: + current_meta["title"] = item["meta"].section_title + current_meta["path"] = item["meta"].section_path - combined_est = estimate_tokens(current_chunk_text + "\n\n" + item_text) if current_chunk_text else item_tokens + # Schätzung (Regel 2: Wir verlassen uns darauf) + combined_text = (current_chunk_text + "\n\n" + item_text).strip() if current_chunk_text else item_text + combined_est = estimate_tokens(combined_text) - # Regel 1: Passt die vollständige Sektion nach Schätzung rein? (Kein harter Overflow-Check) if combined_est <= max_tokens: - current_chunk_text = (current_chunk_text + "\n\n" + item_text).strip() + # Regel 1: Vollständiger Abschnitt passt -> Aufnehmen + current_chunk_text = combined_text else: - # Regel 2: Wenn Puffer voll -> Emittieren und Sektion zurücklegen + # Er passt nicht ganz rein. if current_chunk_text: + # Puffer ist bereits gefüllt -> Wegschreiben, Item zurück in die Queue _emit(current_chunk_text, current_meta["title"], current_meta["path"]) current_chunk_text = "" - processing_queue.insert(0, item) + queue.insert(0, item) else: - # Regel 3: Einzelne Sektion zu groß -> Smart Zerlegung + # Regel 3: Einzelner Abschnitt allein ist > max -> Smart Zerlegung sents = split_sentences(item_text) - header_text = "" - if "blocks" in item and item["blocks"][0].kind == "heading": - header_text = item["blocks"][0].text - + header_prefix = item["meta"].text if item["meta"].kind == "heading" else "" + take_sents = []; take_len = 0 while sents: s = sents.pop(0) @@ -105,39 +103,38 @@ def strategy_by_heading(blocks: List[RawBlock], config: Dict[str, Any], note_id: sents.insert(0, s); break take_sents.append(s); take_len += slen + # Ersten Teil emittieren _emit(" ".join(take_sents), current_meta["title"], current_meta["path"]) - # Carry-Over: Der Rest wird an den Anfang der Queue geschoben + # Rest als Carry-Over zurück in die Queue (Regel 3) if sents: remainder_text = " ".join(sents) - # Falls wir einen Header haben, fügen wir ihn dem Rest für den Kontext hinzu - if header_text and not remainder_text.startswith(header_text): - remainder_text = header_text + "\n\n" + remainder_text - processing_queue.insert(0, {"text": remainder_text, "is_split": True}) + # Kontext-Erhalt: Überschrift für den Rest wiederholen + if header_prefix and not remainder_text.startswith(header_prefix): + remainder_text = header_prefix + "\n\n" + remainder_text + queue.insert(0, {"text": remainder_text, "meta": item["meta"]}) + # Letzten Rest wegschreiben if current_chunk_text: _emit(current_chunk_text, current_meta["title"], current_meta["path"]) return chunks def strategy_sliding_window(blocks: List[RawBlock], config: Dict[str, Any], note_id: str, context_prefix: str = "") -> List[Chunk]: - """Sliding Window Strategie: Erhalten für alternative Anwendungsfälle.""" + """Sliding Window: Unverändert erhalten für Standard-Typen.""" target = config.get("target", 400); max_tokens = config.get("max", 600) chunks: List[Chunk] = []; buf: List[RawBlock] = [] - for b in blocks: b_tokens = estimate_tokens(b.text) - current_tokens = sum(estimate_tokens(x.text) for x in buf) if buf else 0 - if current_tokens + b_tokens > max_tokens and buf: + curr_tokens = sum(estimate_tokens(x.text) for x in buf) if buf else 0 + if curr_tokens + b_tokens > max_tokens and buf: txt = "\n\n".join([x.text for x in buf]); idx = len(chunks) - win = f"{context_prefix}\n{txt}".strip() if context_prefix else txt - chunks.append(Chunk(id=f"{note_id}#c{idx:02d}", note_id=note_id, index=idx, text=txt, window=win, token_count=current_tokens, section_title=buf[0].section_title, section_path=buf[0].section_path, neighbors_prev=None, neighbors_next=None)) + win = _create_win(doc_title=context_prefix, sec_title=buf[0].section_title, text=txt) + chunks.append(Chunk(id=f"{note_id}#c{idx:02d}", note_id=note_id, index=idx, text=txt, window=win, token_count=curr_tokens, section_title=buf[0].section_title, section_path=buf[0].section_path, neighbors_prev=None, neighbors_next=None)) buf = [] buf.append(b) - if buf: txt = "\n\n".join([x.text for x in buf]); idx = len(chunks) - win = f"{context_prefix}\n{txt}".strip() if context_prefix else txt + win = _create_win(doc_title=context_prefix, sec_title=buf[0].section_title, text=txt) chunks.append(Chunk(id=f"{note_id}#c{idx:02d}", note_id=note_id, index=idx, text=txt, window=win, token_count=estimate_tokens(txt), section_title=buf[0].section_title, section_path=buf[0].section_path, neighbors_prev=None, neighbors_next=None)) - return chunks \ No newline at end of file