All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 4s
220 lines
10 KiB
Python
220 lines
10 KiB
Python
"""
|
|
FILE: app/core/ingestion/ingestion_processor.py
|
|
DESCRIPTION: Der zentrale IngestionService (Orchestrator).
|
|
WP-14: Modularisierung der Datenbank-Ebene (app.core.database).
|
|
WP-15b: Two-Pass Workflow mit globalem Kontext-Cache.
|
|
WP-20/22: Cloud-Resilienz und Content-Lifecycle integriert.
|
|
AUDIT v2.13.10: Umstellung auf app.core.database Infrastruktur.
|
|
VERSION: 2.13.10
|
|
STATUS: Active
|
|
"""
|
|
import logging
|
|
import asyncio
|
|
import os
|
|
from typing import Dict, List, Optional, Tuple, Any
|
|
|
|
# Core Module Imports
|
|
from app.core.parser import (
|
|
read_markdown, pre_scan_markdown, normalize_frontmatter,
|
|
validate_required_frontmatter, NoteContext
|
|
)
|
|
from app.core.chunking import assemble_chunks
|
|
|
|
# MODULARISIERUNG: Neue Import-Pfade für die Datenbank-Ebene
|
|
from app.core.database.qdrant import QdrantConfig, get_client, ensure_collections, ensure_payload_indexes
|
|
from app.core.database.qdrant_points import points_for_chunks, points_for_note, points_for_edges, upsert_batch
|
|
|
|
# Services
|
|
from app.services.embeddings_client import EmbeddingsClient
|
|
from app.services.edge_registry import registry as edge_registry
|
|
from app.services.llm_service import LLMService
|
|
|
|
# Package-Interne Imports (Refactoring WP-14)
|
|
from .ingestion_utils import load_type_registry, resolve_note_type, get_chunk_config_by_profile
|
|
from .ingestion_db import fetch_note_payload, artifacts_missing, purge_artifacts
|
|
from .ingestion_validation import validate_edge_candidate
|
|
from .ingestion_note_payload import make_note_payload
|
|
from .ingestion_chunk_payload import make_chunk_payloads
|
|
|
|
# Fallback für Edges (Struktur-Verknüpfung)
|
|
try:
|
|
from app.core.graph.graph_derive_edges import build_edges_for_note
|
|
except ImportError:
|
|
def build_edges_for_note(*args, **kwargs): return []
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class IngestionService:
|
|
def __init__(self, collection_prefix: str = None):
|
|
"""Initialisiert den Service und nutzt die neue database-Infrastruktur."""
|
|
from app.config import get_settings
|
|
self.settings = get_settings()
|
|
|
|
self.prefix = collection_prefix or self.settings.COLLECTION_PREFIX
|
|
self.cfg = QdrantConfig.from_env()
|
|
# Synchronisierung der Konfiguration mit dem Instanz-Präfix
|
|
self.cfg.prefix = self.prefix
|
|
self.client = get_client(self.cfg)
|
|
self.dim = self.settings.VECTOR_SIZE
|
|
self.registry = load_type_registry()
|
|
self.embedder = EmbeddingsClient()
|
|
self.llm = LLMService()
|
|
|
|
self.active_hash_mode = self.settings.CHANGE_DETECTION_MODE
|
|
self.batch_cache: Dict[str, NoteContext] = {} # WP-15b LocalBatchCache
|
|
|
|
try:
|
|
# Aufruf der modularisierten Schema-Logik
|
|
ensure_collections(self.client, self.prefix, self.dim)
|
|
ensure_payload_indexes(self.client, self.prefix)
|
|
except Exception as e:
|
|
logger.warning(f"DB initialization warning: {e}")
|
|
|
|
async def run_batch(self, file_paths: List[str], vault_root: str) -> List[Dict[str, Any]]:
|
|
"""
|
|
WP-15b: Implementiert den Two-Pass Ingestion Workflow.
|
|
Pass 1: Pre-Scan füllt den Context-Cache (3-Wege-Indexierung).
|
|
Pass 2: Verarbeitung nutzt den Cache für die semantische Prüfung.
|
|
"""
|
|
logger.info(f"🔍 [Pass 1] Pre-Scanning {len(file_paths)} files for Context Cache...")
|
|
for path in file_paths:
|
|
try:
|
|
# Übergabe der Registry für dynamische Scan-Tiefe
|
|
ctx = pre_scan_markdown(path, registry=self.registry)
|
|
if ctx:
|
|
# Mehrfache Indizierung für robusten Look-up (ID, Titel, Dateiname)
|
|
self.batch_cache[ctx.note_id] = ctx
|
|
self.batch_cache[ctx.title] = ctx
|
|
fname = os.path.splitext(os.path.basename(path))[0]
|
|
self.batch_cache[fname] = ctx
|
|
except Exception as e:
|
|
logger.warning(f"⚠️ Pre-scan failed for {path}: {e}")
|
|
|
|
logger.info(f"🚀 [Pass 2] Semantic Processing of {len(file_paths)} files...")
|
|
return [await self.process_file(p, vault_root, apply=True, purge_before=True) for p in file_paths]
|
|
|
|
async def process_file(self, file_path: str, vault_root: str, **kwargs) -> Dict[str, Any]:
|
|
"""Transformiert eine Markdown-Datei in den Graphen."""
|
|
apply = kwargs.get("apply", False)
|
|
force_replace = kwargs.get("force_replace", False)
|
|
purge_before = kwargs.get("purge_before", False)
|
|
note_scope_refs = kwargs.get("note_scope_refs", False)
|
|
hash_source = kwargs.get("hash_source", "parsed")
|
|
hash_normalize = kwargs.get("hash_normalize", "canonical")
|
|
|
|
result = {"path": file_path, "status": "skipped", "changed": False, "error": None}
|
|
|
|
# 1. Parse & Lifecycle Gate
|
|
try:
|
|
parsed = read_markdown(file_path)
|
|
if not parsed: return {**result, "error": "Empty file"}
|
|
fm = normalize_frontmatter(parsed.frontmatter)
|
|
validate_required_frontmatter(fm)
|
|
except Exception as e:
|
|
return {**result, "error": f"Validation failed: {str(e)}"}
|
|
|
|
# Dynamischer Lifecycle-Filter aus der Registry (WP-14)
|
|
ingest_cfg = self.registry.get("ingestion_settings", {})
|
|
ignore_list = ingest_cfg.get("ignore_statuses", ["system", "template", "archive", "hidden"])
|
|
|
|
current_status = fm.get("status", "draft").lower().strip()
|
|
if current_status in ignore_list:
|
|
return {**result, "status": "skipped", "reason": "lifecycle_filter"}
|
|
|
|
# 2. Payload & Change Detection (Multi-Hash)
|
|
note_type = resolve_note_type(self.registry, fm.get("type"))
|
|
note_pl = make_note_payload(
|
|
parsed, vault_root=vault_root, file_path=file_path,
|
|
hash_source=hash_source, hash_normalize=hash_normalize,
|
|
types_cfg=self.registry
|
|
)
|
|
note_id = note_pl["note_id"]
|
|
|
|
old_payload = None if force_replace else fetch_note_payload(self.client, self.prefix, note_id)
|
|
check_key = f"{self.active_hash_mode}:{hash_source}:{hash_normalize}"
|
|
old_hash = (old_payload or {}).get("hashes", {}).get(check_key)
|
|
new_hash = note_pl.get("hashes", {}).get(check_key)
|
|
|
|
c_miss, e_miss = artifacts_missing(self.client, self.prefix, note_id)
|
|
if not (force_replace or not old_payload or old_hash != new_hash or c_miss or e_miss):
|
|
return {**result, "status": "unchanged", "note_id": note_id}
|
|
|
|
if not apply:
|
|
return {**result, "status": "dry-run", "changed": True, "note_id": note_id}
|
|
|
|
# 3. Deep Processing (Chunking, Validation, Embedding)
|
|
try:
|
|
body_text = getattr(parsed, "body", "") or ""
|
|
edge_registry.ensure_latest()
|
|
profile = fm.get("chunk_profile") or fm.get("chunking_profile") or "sliding_standard"
|
|
chunk_cfg = get_chunk_config_by_profile(self.registry, profile, note_type)
|
|
enable_smart = chunk_cfg.get("enable_smart_edge_allocation", False)
|
|
|
|
# WP-15b: Chunker-Aufruf bereitet Candidate-Pool vor
|
|
chunks = await assemble_chunks(note_id, body_text, note_type, config=chunk_cfg)
|
|
for ch in chunks:
|
|
filtered = []
|
|
for cand in getattr(ch, "candidate_pool", []):
|
|
# WP-15b: Nur global_pool Kandidaten erfordern binäre Validierung
|
|
if cand.get("provenance") == "global_pool" and enable_smart:
|
|
if await validate_edge_candidate(ch.text, cand, self.batch_cache, self.llm, self.settings.MINDNET_LLM_PROVIDER):
|
|
filtered.append(cand)
|
|
else:
|
|
filtered.append(cand)
|
|
ch.candidate_pool = filtered
|
|
|
|
# Payload-Erstellung via interne Module
|
|
chunk_pls = make_chunk_payloads(
|
|
fm, note_pl["path"], chunks, file_path=file_path,
|
|
types_cfg=self.registry
|
|
)
|
|
vecs = await self.embedder.embed_documents([c.get("window") or "" for c in chunk_pls]) if chunk_pls else []
|
|
|
|
# Kanten-Aggregation
|
|
edges = build_edges_for_note(
|
|
note_id, chunk_pls,
|
|
note_level_references=note_pl.get("references", []),
|
|
include_note_scope_refs=note_scope_refs
|
|
)
|
|
for e in edges:
|
|
e["kind"] = edge_registry.resolve(
|
|
e.get("kind", "related_to"),
|
|
provenance=e.get("provenance", "explicit"),
|
|
context={"file": file_path, "note_id": note_id, "line": e.get("line", "system")}
|
|
)
|
|
|
|
# 4. DB Upsert via modularisierter Points-Logik
|
|
if purge_before and old_payload:
|
|
purge_artifacts(self.client, self.prefix, note_id)
|
|
|
|
n_name, n_pts = points_for_note(self.prefix, note_pl, None, self.dim)
|
|
upsert_batch(self.client, n_name, n_pts)
|
|
|
|
if chunk_pls and vecs:
|
|
c_pts = points_for_chunks(self.prefix, chunk_pls, vecs)[1]
|
|
upsert_batch(self.client, f"{self.prefix}_chunks", c_pts)
|
|
|
|
if edges:
|
|
e_pts = points_for_edges(self.prefix, edges)[1]
|
|
upsert_batch(self.client, f"{self.prefix}_edges", e_pts)
|
|
|
|
return {
|
|
"path": file_path,
|
|
"status": "success",
|
|
"changed": True,
|
|
"note_id": note_id,
|
|
"chunks_count": len(chunk_pls),
|
|
"edges_count": len(edges)
|
|
}
|
|
except Exception as e:
|
|
logger.error(f"Processing failed: {e}", exc_info=True)
|
|
return {**result, "error": str(e)}
|
|
|
|
async def create_from_text(self, markdown_content: str, filename: str, vault_root: str, folder: str = "00_Inbox") -> Dict[str, Any]:
|
|
"""Erstellt eine Note aus einem Textstream und triggert die Ingestion."""
|
|
target_path = os.path.join(vault_root, folder, filename)
|
|
os.makedirs(os.path.dirname(target_path), exist_ok=True)
|
|
with open(target_path, "w", encoding="utf-8") as f:
|
|
f.write(markdown_content)
|
|
await asyncio.sleep(0.1)
|
|
return await self.process_file(file_path=target_path, vault_root=vault_root, apply=True, force_replace=True, purge_before=True) |