mindnet/app/core/ingestion/ingestion_processor.py

344 lines
17 KiB
Python

"""
FILE: app/core/ingestion/ingestion_processor.py
DESCRIPTION: Der zentrale IngestionService (Orchestrator).
WP-24c: Integration der Symmetrie-Logik (Automatische inverse Kanten).
WP-25a: Integration der Mixture of Experts (MoE) Architektur.
WP-15b: Two-Pass Workflow mit globalem Kontext-Cache.
WP-20/22: Cloud-Resilienz und Content-Lifecycle integriert.
AUDIT v3.1.6: ID-Kollisions-Schutz & Point-Authority Check gegen
Überschreiben expliziter Kanten.
VERSION: 3.1.6 (WP-24c: Deterministic ID Protection)
STATUS: Active
"""
import logging
import asyncio
import os
from typing import Dict, List, Optional, Tuple, Any
# Core Module Imports
from app.core.parser import (
read_markdown, pre_scan_markdown, normalize_frontmatter,
validate_required_frontmatter, NoteContext
)
from app.core.chunking import assemble_chunks
# WP-24c: Import für die deterministische ID-Vorabberechnung
from app.core.graph.graph_utils import _mk_edge_id
# MODULARISIERUNG: Neue Import-Pfade für die Datenbank-Ebene
from app.core.database.qdrant import QdrantConfig, get_client, ensure_collections, ensure_payload_indexes
from app.core.database.qdrant_points import points_for_chunks, points_for_note, points_for_edges, upsert_batch
from qdrant_client.http import models as rest # Für Real-Time DB-Checks
# Services
from app.services.embeddings_client import EmbeddingsClient
from app.services.edge_registry import registry as edge_registry
from app.services.llm_service import LLMService
# Package-Interne Imports
from .ingestion_utils import load_type_registry, resolve_note_type, get_chunk_config_by_profile
from .ingestion_db import fetch_note_payload, artifacts_missing, purge_artifacts
from .ingestion_validation import validate_edge_candidate
from .ingestion_note_payload import make_note_payload
from .ingestion_chunk_payload import make_chunk_payloads
# Fallback für Edges (Struktur-Verknüpfung)
try:
from app.core.graph.graph_derive_edges import build_edges_for_note
except ImportError:
def build_edges_for_note(*args, **kwargs): return []
logger = logging.getLogger(__name__)
class IngestionService:
def __init__(self, collection_prefix: str = None):
"""Initialisiert den Service und nutzt die neue database-Infrastruktur."""
from app.config import get_settings
self.settings = get_settings()
self.prefix = collection_prefix or self.settings.COLLECTION_PREFIX
self.cfg = QdrantConfig.from_env()
# Synchronisierung der Konfiguration mit dem Instanz-Präfix
self.cfg.prefix = self.prefix
self.client = get_client(self.cfg)
self.registry = load_type_registry()
self.embedder = EmbeddingsClient()
self.llm = LLMService()
# WP-25a: Auflösung der Dimension über das Embedding-Profil (MoE)
embed_cfg = self.llm.profiles.get("embedding_expert", {})
self.dim = embed_cfg.get("dimensions") or self.settings.VECTOR_SIZE
# Festlegen, welcher Hash für die Change-Detection maßgeblich ist
self.active_hash_mode = self.settings.CHANGE_DETECTION_MODE
self.batch_cache: Dict[str, NoteContext] = {} # WP-15b LocalBatchCache
try:
# Aufruf der modularisierten Schema-Logik
ensure_collections(self.client, self.prefix, self.dim)
ensure_payload_indexes(self.client, self.prefix)
except Exception as e:
logger.warning(f"DB initialization warning: {e}")
async def run_batch(self, file_paths: List[str], vault_root: str) -> List[Dict[str, Any]]:
"""
WP-15b: Implementiert den Two-Pass Ingestion Workflow.
Pass 1: Pre-Scan füllt den Context-Cache (3-Wege-Indexierung).
Pass 2: Verarbeitung nutzt den Cache für die semantische Prüfung.
"""
logger.info(f"🔍 [Pass 1] Pre-Scanning {len(file_paths)} files for Context Cache...")
for path in file_paths:
try:
# Übergabe der Registry für dynamische Scan-Tiefe
ctx = pre_scan_markdown(path, registry=self.registry)
if ctx:
# Mehrfache Indizierung für robusten Look-up (ID, Titel, Dateiname)
self.batch_cache[ctx.note_id] = ctx
self.batch_cache[ctx.title] = ctx
fname = os.path.splitext(os.path.basename(path))[0]
self.batch_cache[fname] = ctx
except Exception as e:
logger.warning(f"⚠️ Pre-scan failed for {path}: {e}")
logger.info(f"🚀 [Pass 2] Semantic Processing of {len(file_paths)} files...")
return [await self.process_file(p, vault_root, apply=True, purge_before=True) for p in file_paths]
async def _is_explicit_edge_in_db(self, edge_id: str) -> bool:
"""
WP-24c: Prüft via Point-ID, ob bereits eine explizite (manuelle) Kante in Qdrant liegt.
Verhindert, dass virtuelle Symmetrien bestehendes Wissen überschreiben.
"""
edges_col = f"{self.prefix}_edges"
try:
# Direkte Punkt-Abfrage ist schneller als Scroll/Filter
res = self.client.retrieve(
collection_name=edges_col,
ids=[edge_id],
with_payload=True,
with_vectors=False
)
if res and not res[0].payload.get("virtual", False):
return True # Punkt existiert und ist NICHT virtuell
return False
except Exception:
return False
async def process_file(self, file_path: str, vault_root: str, **kwargs) -> Dict[str, Any]:
"""Transformiert eine Markdown-Datei in den Graphen."""
apply = kwargs.get("apply", False)
force_replace = kwargs.get("force_replace", False)
purge_before = kwargs.get("purge_before", False)
note_scope_refs = kwargs.get("note_scope_refs", False)
hash_source = kwargs.get("hash_source", "parsed")
hash_normalize = kwargs.get("hash_normalize", "canonical")
result = {"path": file_path, "status": "skipped", "changed": False, "error": None}
# 1. Parse & Lifecycle Gate
try:
parsed = read_markdown(file_path)
if not parsed: return {**result, "error": "Empty file"}
fm = normalize_frontmatter(parsed.frontmatter)
validate_required_frontmatter(fm)
except Exception as e:
return {**result, "error": f"Validation failed: {str(e)}"}
# Dynamischer Lifecycle-Filter aus der Registry (WP-14)
ingest_cfg = self.registry.get("ingestion_settings", {})
ignore_list = ingest_cfg.get("ignore_statuses", ["system", "template", "archive", "hidden"])
current_status = fm.get("status", "draft").lower().strip()
if current_status in ignore_list:
return {**result, "status": "skipped", "reason": "lifecycle_filter"}
# 2. Payload & Change Detection (Multi-Hash)
note_type = resolve_note_type(self.registry, fm.get("type"))
note_pl = make_note_payload(
parsed, vault_root=vault_root, file_path=file_path,
hash_source=hash_source, hash_normalize=hash_normalize,
types_cfg=self.registry
)
note_id = note_pl["note_id"]
# Abgleich mit der Datenbank (Qdrant)
old_payload = None if force_replace else fetch_note_payload(self.client, self.prefix, note_id)
# Prüfung gegen den konfigurierten Hash-Modus (body vs. full)
check_key = f"{self.active_hash_mode}:{hash_source}:{hash_normalize}"
old_hash = (old_payload or {}).get("hashes", {}).get(check_key)
new_hash = note_pl.get("hashes", {}).get(check_key)
# Check ob Chunks oder Kanten in der DB fehlen (Reparatur-Modus)
c_miss, e_miss = artifacts_missing(self.client, self.prefix, note_id)
# Wenn Hash identisch und Artefakte vorhanden -> Skip
if not (force_replace or not old_payload or old_hash != new_hash or c_miss or e_miss):
return {**result, "status": "unchanged", "note_id": note_id}
if not apply:
return {**result, "status": "dry-run", "changed": True, "note_id": note_id}
# 3. Deep Processing (Chunking, Validation, Embedding)
try:
body_text = getattr(parsed, "body", "") or ""
edge_registry.ensure_latest()
# Profil-Auflösung via Registry
profile = note_pl.get("chunk_profile", "sliding_standard")
chunk_cfg = get_chunk_config_by_profile(self.registry, profile, note_type)
enable_smart = chunk_cfg.get("enable_smart_edge_allocation", False)
# WP-15b: Chunker-Aufruf bereitet den Candidate-Pool pro Chunk vor.
chunks = await assemble_chunks(note_id, body_text, note_type, config=chunk_cfg)
# Semantische Kanten-Validierung (Primärprüfung)
for ch in chunks:
new_pool = []
for cand in getattr(ch, "candidate_pool", []):
# WP-25a: Profilgesteuerte binäre Validierung
if cand.get("provenance") == "global_pool" and enable_smart:
is_valid = await validate_edge_candidate(
ch.text,
cand,
self.batch_cache,
self.llm,
profile_name="ingest_validator"
)
if is_valid:
new_pool.append(cand)
else:
# Explizite Kanten (Wikilinks/Callouts) werden übernommen
new_pool.append(cand)
ch.candidate_pool = new_pool
# Payload-Erstellung für die Chunks
chunk_pls = make_chunk_payloads(
fm, note_pl["path"], chunks, file_path=file_path,
types_cfg=self.registry
)
# Vektorisierung der Fenster-Texte
vecs = await self.embedder.embed_documents([c.get("window") or "" for c in chunk_pls]) if chunk_pls else []
# Aggregation aller finalen Kanten (Edges)
raw_edges = build_edges_for_note(
note_id, chunk_pls,
note_level_references=note_pl.get("references", []),
include_note_scope_refs=note_scope_refs
)
# --- WP-24c: Symmetrie-Injektion (Bidirektionale Graph-Logik) ---
final_edges = []
for e in raw_edges:
# 1. Primär-Kante kanonisieren & Owner setzen
resolved_kind = edge_registry.resolve(
e.get("kind", "related_to"),
provenance=e.get("provenance", "explicit"),
context={"file": file_path, "note_id": note_id}
)
e["kind"] = resolved_kind
# Markierung der Herkunft für selektiven Purge
e["origin_note_id"] = note_id
e["virtual"] = False # Explizite Kanten sind niemals virtuell
final_edges.append(e)
# 2. Symmetrie-Ermittlung via Registry
inverse_kind = edge_registry.get_inverse(resolved_kind)
target_raw = e.get("target_id")
# ID-Resolution: Finden der echten Note_ID im Cache
target_ctx = self.batch_cache.get(target_raw)
target_canonical_id = target_ctx.note_id if target_ctx else target_raw
# Validierung für Symmetrie-Erzeugung (Kein Self-Loop, Existenz der Inversen)
if (inverse_kind and target_canonical_id and target_canonical_id != note_id):
# A. Lokale Redundanz: Hat der User in DIESER Note schon die Gegenrichtung definiert?
is_local_redundant = any(
ex.get("target_id") == target_canonical_id and
edge_registry.resolve(ex.get("kind")) == inverse_kind
for ex in raw_edges
)
# B. Cross-Note Redundanz Check (v3.1.6): Schutz vor Point-Überschreibung
is_cross_protected = False
# 1. Prüfung im Batch-Cache (für Notizen im gleichen Lauf)
if target_ctx and hasattr(target_ctx, 'links'):
for link in target_ctx.links:
link_to_id = self.batch_cache.get(link.get("to"), {}).note_id or link.get("to")
if link_to_id == note_id:
planned_kind_in_target = edge_registry.resolve(link.get("kind", "related_to"))
if planned_kind_in_target == inverse_kind:
is_cross_protected = True
break
# 2. Point-Authority Check (v3.1.6): ID berechnen und in DB prüfen
if not is_cross_protected:
# Wir simulieren die ID, die diese Kante in Qdrant hätte
# Parameter: kind, source_id, target_id, scope
potential_id = _mk_edge_id(inverse_kind, target_canonical_id, note_id, e.get("scope", "note"))
is_cross_protected = await self._is_explicit_edge_in_db(potential_id)
# Nur anlegen, wenn keine Form von Redundanz/Schutz vorliegt
if not is_local_redundant and not is_cross_protected and (inverse_kind != resolved_kind or resolved_kind not in ["related_to", "references"]):
inv_edge = e.copy()
# Richtungs-Umkehr
inv_edge["note_id"] = target_canonical_id
inv_edge["target_id"] = note_id
inv_edge["kind"] = inverse_kind
# Metadaten für Struktur-Kante
inv_edge["virtual"] = True
inv_edge["provenance"] = "structure"
inv_edge["confidence"] = e.get("confidence", 0.9) * 0.9
# Lifecycle-Verankerung: Diese Kante gehört logisch zum Verursacher (Note A)
inv_edge["origin_note_id"] = note_id
final_edges.append(inv_edge)
logger.info(f"🔄 [SYMMETRY] Built inverse: {target_canonical_id} --({inverse_kind})--> {note_id}")
edges = final_edges
# 4. DB Upsert via modularisierter Points-Logik
if purge_before and old_payload:
purge_artifacts(self.client, self.prefix, note_id)
# Speichern der Haupt-Note
n_name, n_pts = points_for_note(self.prefix, note_pl, None, self.dim)
upsert_batch(self.client, n_name, n_pts)
# Speichern der Chunks
if chunk_pls and vecs:
c_pts = points_for_chunks(self.prefix, chunk_pls, vecs)[1]
upsert_batch(self.client, f"{self.prefix}_chunks", c_pts)
# Speichern der Kanten (inklusive der virtuellen Inversen)
if edges:
e_pts = points_for_edges(self.prefix, edges)[1]
upsert_batch(self.client, f"{self.prefix}_edges", e_pts)
return {
"path": file_path,
"status": "success",
"changed": True,
"note_id": note_id,
"chunks_count": len(chunk_pls),
"edges_count": len(edges)
}
except Exception as e:
logger.error(f"Processing failed: {e}", exc_info=True)
return {**result, "error": str(e)}
async def create_from_text(self, markdown_content: str, filename: str, vault_root: str, folder: str = "00_Inbox") -> Dict[str, Any]:
"""Erstellt eine Note aus einem Textstream und triggert die Ingestion."""
target_path = os.path.join(vault_root, folder, filename)
os.makedirs(os.path.dirname(target_path), exist_ok=True)
with open(target_path, "w", encoding="utf-8") as f:
f.write(markdown_content)
await asyncio.sleep(0.1)
# Triggert sofortigen Import mit force_replace/purge_before
return await self.process_file(file_path=target_path, vault_root=vault_root, apply=True, force_replace=True, purge_before=True)