140 lines
6.8 KiB
Python
140 lines
6.8 KiB
Python
"""
|
|
FILE: app/core/chunking/chunking_strategies.py
|
|
DESCRIPTION: Universelle Strategie für atomares Sektions-Chunking v3.9.0.
|
|
Regelkonforme Implementierung: Pack-Sections, Trust Estimation, Carry-Over.
|
|
"""
|
|
from typing import List, Dict, Any, Optional
|
|
from .chunking_models import RawBlock, Chunk
|
|
from .chunking_utils import estimate_tokens
|
|
from .chunking_parser import split_sentences
|
|
|
|
def _create_win(doc_title: str, sec_title: Optional[str], text: str) -> str:
|
|
parts = [doc_title] if doc_title else []
|
|
if sec_title and sec_title != doc_title: parts.append(sec_title)
|
|
prefix = " > ".join(parts)
|
|
return f"{prefix}\n{text}".strip() if prefix else text
|
|
|
|
def strategy_by_heading(blocks: List[RawBlock], config: Dict[str, Any], note_id: str, doc_title: str = "") -> List[Chunk]:
|
|
smart_edge = config.get("enable_smart_edge_allocation", True)
|
|
target = config.get("target", 400)
|
|
max_tokens = config.get("max", 600)
|
|
split_level = config.get("split_level", 2)
|
|
overlap_cfg = config.get("overlap", (50, 80))
|
|
overlap = sum(overlap_cfg) // 2 if isinstance(overlap_cfg, (list, tuple)) else overlap_cfg
|
|
|
|
chunks: List[Chunk] = []
|
|
|
|
def _emit(txt, title, path):
|
|
idx = len(chunks)
|
|
win = _create_win(doc_title, title, txt)
|
|
chunks.append(Chunk(
|
|
id=f"{note_id}#c{idx:02d}", note_id=note_id, index=idx,
|
|
text=txt, window=win, token_count=estimate_tokens(txt),
|
|
section_title=title, section_path=path, neighbors_prev=None, neighbors_next=None
|
|
))
|
|
|
|
# --- FALL A: HARD SPLIT (enable_smart_edge_allocation: false) ---
|
|
if not smart_edge:
|
|
buf = []
|
|
for b in blocks:
|
|
# Trenne hart bei Überschrift <= split_level
|
|
if b.kind == "heading" and b.level <= split_level:
|
|
# Prüfe, ob Puffer mehr als nur Überschriften enthält (keine leeren Chunks)
|
|
has_content = any(x.kind != "heading" for x in buf)
|
|
if buf and has_content:
|
|
_emit("\n\n".join([x.text for x in buf]), buf[0].section_title, buf[0].section_path)
|
|
buf = []
|
|
buf.append(b)
|
|
if buf: _emit("\n\n".join([x.text for x in buf]), buf[0].section_title, buf[0].section_path)
|
|
return chunks
|
|
|
|
# --- FALL B: SMART EDGE ALLOCATION (Pack-and-Carry-Over) ---
|
|
# 1. Gruppierung in atomare Einheiten (Sektions-Isolation)
|
|
sections: List[Dict[str, Any]] = []
|
|
curr_blocks = []
|
|
for b in blocks:
|
|
if b.kind == "heading" and b.level <= split_level:
|
|
if curr_blocks:
|
|
sections.append({"text": "\n\n".join([x.text for x in curr_blocks]), "meta": curr_blocks[0]})
|
|
curr_blocks = [b]
|
|
else: curr_blocks.append(b)
|
|
if curr_blocks:
|
|
sections.append({"text": "\n\n".join([x.text for x in curr_blocks]), "meta": curr_blocks[0]})
|
|
|
|
# 2. Warteschlangen-Verarbeitung (Regel 1-3)
|
|
# Wir nutzen eine Liste als Queue für Carry-Over-Reste
|
|
queue = list(sections)
|
|
current_chunk_text = ""
|
|
current_meta = {"title": None, "path": "/"}
|
|
|
|
while queue:
|
|
item = queue.pop(0)
|
|
item_text = item["text"]
|
|
|
|
# Initialisiere Metadaten für einen neuen Chunk
|
|
if not current_chunk_text:
|
|
current_meta["title"] = item["meta"].section_title
|
|
current_meta["path"] = item["meta"].section_path
|
|
|
|
# Schätzung (Regel 2: Wir verlassen uns darauf)
|
|
combined_text = (current_chunk_text + "\n\n" + item_text).strip() if current_chunk_text else item_text
|
|
combined_est = estimate_tokens(combined_text)
|
|
|
|
if combined_est <= max_tokens:
|
|
# Regel 1: Vollständiger Abschnitt passt -> Aufnehmen
|
|
current_chunk_text = combined_text
|
|
else:
|
|
# Er passt nicht ganz rein.
|
|
if current_chunk_text:
|
|
# Puffer ist bereits gefüllt -> Wegschreiben, Item zurück in die Queue
|
|
_emit(current_chunk_text, current_meta["title"], current_meta["path"])
|
|
current_chunk_text = ""
|
|
queue.insert(0, item)
|
|
else:
|
|
# Regel 3: Einzelner Abschnitt allein ist > max -> Smart Zerlegung
|
|
sents = split_sentences(item_text)
|
|
header_prefix = item["meta"].text if item["meta"].kind == "heading" else ""
|
|
|
|
take_sents = []; take_len = 0
|
|
while sents:
|
|
s = sents.pop(0)
|
|
slen = estimate_tokens(s)
|
|
if take_len + slen > target and take_sents:
|
|
sents.insert(0, s); break
|
|
take_sents.append(s); take_len += slen
|
|
|
|
# Ersten Teil emittieren
|
|
_emit(" ".join(take_sents), current_meta["title"], current_meta["path"])
|
|
|
|
# Rest als Carry-Over zurück in die Queue (Regel 3)
|
|
if sents:
|
|
remainder_text = " ".join(sents)
|
|
# Kontext-Erhalt: Überschrift für den Rest wiederholen
|
|
if header_prefix and not remainder_text.startswith(header_prefix):
|
|
remainder_text = header_prefix + "\n\n" + remainder_text
|
|
queue.insert(0, {"text": remainder_text, "meta": item["meta"]})
|
|
|
|
# Letzten Rest wegschreiben
|
|
if current_chunk_text:
|
|
_emit(current_chunk_text, current_meta["title"], current_meta["path"])
|
|
|
|
return chunks
|
|
|
|
def strategy_sliding_window(blocks: List[RawBlock], config: Dict[str, Any], note_id: str, context_prefix: str = "") -> List[Chunk]:
|
|
"""Sliding Window: Unverändert erhalten für Standard-Typen."""
|
|
target = config.get("target", 400); max_tokens = config.get("max", 600)
|
|
chunks: List[Chunk] = []; buf: List[RawBlock] = []
|
|
for b in blocks:
|
|
b_tokens = estimate_tokens(b.text)
|
|
curr_tokens = sum(estimate_tokens(x.text) for x in buf) if buf else 0
|
|
if curr_tokens + b_tokens > max_tokens and buf:
|
|
txt = "\n\n".join([x.text for x in buf]); idx = len(chunks)
|
|
win = _create_win(doc_title=context_prefix, sec_title=buf[0].section_title, text=txt)
|
|
chunks.append(Chunk(id=f"{note_id}#c{idx:02d}", note_id=note_id, index=idx, text=txt, window=win, token_count=curr_tokens, section_title=buf[0].section_title, section_path=buf[0].section_path, neighbors_prev=None, neighbors_next=None))
|
|
buf = []
|
|
buf.append(b)
|
|
if buf:
|
|
txt = "\n\n".join([x.text for x in buf]); idx = len(chunks)
|
|
win = _create_win(doc_title=context_prefix, sec_title=buf[0].section_title, text=txt)
|
|
chunks.append(Chunk(id=f"{note_id}#c{idx:02d}", note_id=note_id, index=idx, text=txt, window=win, token_count=estimate_tokens(txt), section_title=buf[0].section_title, section_path=buf[0].section_path, neighbors_prev=None, neighbors_next=None))
|
|
return chunks |