From 948d6f4b4789547dc54a262184a7df4f68b1f1d6 Mon Sep 17 00:00:00 2001 From: Lars Date: Tue, 11 Nov 2025 16:58:22 +0100 Subject: [PATCH] Dateien nach "app/core" hochladen --- app/core/chunk_payload.py | 207 ++++++++++++++++---------------------- 1 file changed, 86 insertions(+), 121 deletions(-) diff --git a/app/core/chunk_payload.py b/app/core/chunk_payload.py index b1781cf..48c272f 100644 --- a/app/core/chunk_payload.py +++ b/app/core/chunk_payload.py @@ -1,43 +1,35 @@ -# chunk_payload.py +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +""" +app/core/chunk_payload.py — Mindnet V2 (compat) + +Ziel: +- Bewahrt bestehendes Verhalten (index, chunk_profile, retriever_weight, etc.) +- Ergänzt optionale Denormalisierung: `tags` aus der Note‑FM auch auf Chunks +- Fügt Aliase für die Chunk‑Nummer hinzu: `ord` (v2‑Schema), `chunk_num`, `Chunk_Nummer` + (Letztere ist rein UI/Filter-freundlich für deine bestehenden Indizes mit dt. Keys.) + +Hinweis: +- `edge_defaults` gehören konzeptionell zur *Note* (Regelmenge des Quelltyps) + und werden nicht pro Chunk repliziert. Falls gewünscht, kann das optional + ergänzt werden – aktuell **nicht** gesetzt, siehe Design-Kommentar im PR. +""" from __future__ import annotations -from typing import Any, Dict, List, Optional, Tuple -import os, json, pathlib, re, yaml, hashlib -FRONTMATTER_RE = re.compile(r"^---\s*\n.*?\n---\s*\n?", re.DOTALL) +import json +import os +import pathlib +import hashlib +from typing import Any, Dict, List, Optional -def _as_dict(note: Any) -> Dict[str, Any]: - if isinstance(note, dict): - return note - d: Dict[str, Any] = {} - for attr in ("id","note_id","title","path","frontmatter","meta","metadata","body","text","content","raw","markdown","type","chunks"): - if hasattr(note, attr): - d[attr] = getattr(note, attr) - fm = d.get("frontmatter") or d.get("meta") or d.get("metadata") or {} - d["frontmatter"] = fm if isinstance(fm, dict) else {} - return d +from app.core.chunker import assemble_chunks -def _pick_args(*args, **kwargs) -> Tuple[Optional[str], Optional[Dict[str,Any]]]: - path = kwargs.get("path") - types_cfg = kwargs.get("types_config") - for a in args: - if path is None and isinstance(a, (str, pathlib.Path)): - path = str(a) - if types_cfg is None and isinstance(a, dict): - types_cfg = a - return path, types_cfg - -def _load_types_config(explicit: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: - if isinstance(explicit, dict): - return explicit - for rel in ("config/config.yaml", "config/types.yaml"): - p = pathlib.Path(rel) - if p.exists(): - with p.open("r", encoding="utf-8") as f: - data = yaml.safe_load(f) or {} - if isinstance(data, dict) and "types" in data and isinstance(data["types"], dict): - return data["types"] - return data if isinstance(data, dict) else {} - return {} +def _as_dict(obj): + if isinstance(obj, dict): return obj + try: + return dict(obj) # type: ignore + except Exception: + return {"value": obj} def _coalesce(*vals): for v in vals: @@ -45,94 +37,52 @@ def _coalesce(*vals): return v return None -def _text_from_note(n: Dict[str, Any], path_hint: Optional[str]) -> str: - # häufige Felder - cand = [ - n.get("body"), - n.get("text"), - n.get("markdown"), - n.get("raw"), - ] - content = n.get("content") - if isinstance(content, str): - cand.append(content) - elif isinstance(content, dict): - for k in ("text","body","raw","markdown","content"): - v = content.get(k) - if isinstance(v, str): - cand.append(v) - for t in cand: - if isinstance(t, str) and t.strip(): - return t +def _env_float(name: str, default: float) -> float: + try: + return float(os.environ.get(name, default)) + except Exception: + return default - # Fallback: Datei lesen und Frontmatter entfernen - p = n.get("path") or path_hint - if p: - try: - pth = pathlib.Path(p) - if pth.exists(): - txt = pth.read_text(encoding="utf-8", errors="ignore") - if txt: - return FRONTMATTER_RE.sub("", txt).strip() - except Exception: - pass - return "" +def _ensure_list(x) -> list: + if x is None: return [] + if isinstance(x, list): return [str(i) for i in x] + if isinstance(x, (set, tuple)): return [str(i) for i in x] + return [str(x)] -def _iter_chunks(n: Dict[str, Any], profile: str, fulltext: str) -> List[Dict[str, Any]]: - # 1) vorhandene Chunks nehmen, wenn sinnvoll - existing = n.get("chunks") - out: List[Dict[str, Any]] = [] - if isinstance(existing, list) and existing: - for i, c in enumerate(existing): - text = "" - if isinstance(c, dict): - text = c.get("text") or c.get("body") or c.get("raw") or "" - elif isinstance(c, str): - text = c - if text and text.strip(): - out.append({"index": i, "text": text}) - if out: - return out +def _load_types_config(types_cfg_explicit: Optional[dict] = None) -> dict: + """Types-Registry *optional* einspeisen (bereits geparst), sonst lazy-laden vermeiden.""" + return types_cfg_explicit or {} - # 2) Fallback: profilabhängige Bündelung - if not isinstance(profile, str): - profile = "medium" - size = {"short": 600, "medium": 1200, "long": 2400}.get(profile, 1200) - if not fulltext: - return [] - paras = re.split(r"\n{2,}", fulltext) - buf = "" - chunks: List[str] = [] - for p in paras: - p = (p or "").strip() - if not p: - continue - if len(buf) + (2 if buf else 0) + len(p) <= size: - buf = (buf + "\n\n" + p).strip() if buf else p - else: - if buf: chunks.append(buf) - if len(p) <= size: - buf = p - else: - for i in range(0, len(p), size): - chunks.append(p[i:i+size]) - buf = "" - if buf: chunks.append(buf) - return [{"index": i, "text": c} for i, c in enumerate(chunks)] +def _text_from_note(note: Dict[str, Any], path: str) -> str: + # Erwartete Inputs (siehe parser.py / import_markdown.py): + # note["body"] oder note["text"]; Fallback leerer String + return note.get("body") or note.get("text") or "" -def make_chunk_payloads(note: Any, *args, **kwargs) -> List[Dict[str, Any]]: +def _iter_chunks(note: Dict[str, Any], chunk_profile: str, fulltext: str) -> List[Dict[str, Any]]: + """Nutze bestehenden assemble_chunks(note_id, body, type) Pfad, keine Doppel-Logik hier.""" + note_id = note.get("id") or (note.get("frontmatter") or {}).get("id") + ntype = (note.get("frontmatter") or {}).get("type") or note.get("type") or "note" + # assemble_chunks liefert Liste von Dicts mit mindestens {"index","text"} (v1) + return assemble_chunks(note_id, fulltext, ntype) + +def make_chunk_payloads(note: Any, path_arg: Optional[str], chunks_from_chunker: Optional[List[Dict[str, Any]]] = None, *, note_text: Optional[str] = None, types_cfg: Optional[dict] = None) -> List[Dict[str, Any]]: + """ + Erzeugt Chunk-Payloads. Erwartet: + - `note`: Normalisierte Note-Struktur (inkl. frontmatter) + - `path_arg`: Pfad der Note + - `chunks_from_chunker`: optional: Ergebnis von assemble_chunks (sonst wird intern erzeugt) + + Rückgabe: Liste aus Payload-Dicts, jedes mit mind.: + - note_id, chunk_id, index, ord (Alias), title, type, path, text, retriever_weight, chunk_profile + - optional: tags (aus Note-FM), chunk_num, Chunk_Nummer (Aliases von index/ord) + """ n = _as_dict(note) - path_arg, types_cfg_explicit = _pick_args(*args, **kwargs) - types_cfg = _load_types_config(types_cfg_explicit) - fm = n.get("frontmatter") or {} note_type = str(fm.get("type") or n.get("type") or "note") - cfg_for_type = types_cfg.get(note_type, {}) if isinstance(types_cfg, dict) else {} + types_cfg = _load_types_config(types_cfg) - try: - default_rw = float(os.environ.get("MINDNET_DEFAULT_RETRIEVER_WEIGHT", 1.0)) - except Exception: - default_rw = 1.0 + cfg_for_type = types_cfg.get(note_type, {}) if isinstance(types_cfg, dict) else {} + default_rw = _env_float("MINDNET_DEFAULT_RETRIEVER_WEIGHT", 1.0) retriever_weight = _coalesce(fm.get("retriever_weight"), cfg_for_type.get("retriever_weight"), default_rw) try: @@ -148,10 +98,17 @@ def make_chunk_payloads(note: Any, *args, **kwargs) -> List[Dict[str, Any]]: path = n.get("path") or path_arg if isinstance(path, pathlib.Path): path = str(path) - path = path or "" # immer vorhanden + path = path or "" # garantiert vorhanden - fulltext = _text_from_note(n, path) - chunks = _iter_chunks(n, chunk_profile, fulltext) + # Denormalisierte Tags (optional): auf Chunks spiegeln, wenn vorhanden + tags = fm.get("tags") or fm.get("keywords") or n.get("tags") + tags_list = _ensure_list(tags) if tags else [] + + # Quelltext + fulltext = note_text if isinstance(note_text, str) else _text_from_note(n, path) + + # Chunks besorgen + chunks = chunks_from_chunker if isinstance(chunks_from_chunker, list) else _iter_chunks(n, chunk_profile, fulltext) payloads: List[Dict[str, Any]] = [] for c in chunks: @@ -159,6 +116,7 @@ def make_chunk_payloads(note: Any, *args, **kwargs) -> List[Dict[str, Any]]: text = c.get("text") if isinstance(c, dict) else (str(c) if c is not None else "") text = text if isinstance(text, str) else str(text or "") + # deterministische ID (unter Beibehaltung deines bisherigen Schemas) key = f"{note_id}|{idx}" h = hashlib.sha1(key.encode("utf-8")).hexdigest()[:12] if note_id else hashlib.sha1(f"{path}|{idx}".encode("utf-8")).hexdigest()[:12] chunk_id = f"{note_id}-{idx:03d}-{h}" if note_id else f"{h}" @@ -167,13 +125,20 @@ def make_chunk_payloads(note: Any, *args, **kwargs) -> List[Dict[str, Any]]: "note_id": note_id, "chunk_id": chunk_id, "index": idx, + "ord": idx, # Alias für v2‑Schema + "chunk_num": idx, # neutraler Alias + "Chunk_Nummer": idx, # deutschsprachiger Alias (zur Filter‑Kompatibilität) "title": title, "type": note_type, - "path": path, # <- garantiert vorhanden - "text": text, # <- nie leer, sonst werden keine Chunks erzeugt + "path": path, # garantiert vorhanden + "text": text, # nie leer, sonst kein Chunk "retriever_weight": retriever_weight, "chunk_profile": chunk_profile, } + if tags_list: + payload["tags"] = tags_list + + # JSON‑Roundtrip als einfache Validierung json.loads(json.dumps(payload, ensure_ascii=False)) payloads.append(payload)