app/core/ingestion.py aktualisiert
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 4s
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 4s
This commit is contained in:
parent
2985f5288b
commit
5213d262a2
|
|
@ -3,10 +3,11 @@ FILE: app/core/ingestion.py
|
||||||
DESCRIPTION: Haupt-Ingestion-Logik. Transformiert Markdown in den Graphen.
|
DESCRIPTION: Haupt-Ingestion-Logik. Transformiert Markdown in den Graphen.
|
||||||
WP-20: Optimiert für OpenRouter (mistralai/mistral-7b-instruct:free).
|
WP-20: Optimiert für OpenRouter (mistralai/mistral-7b-instruct:free).
|
||||||
WP-22: Content Lifecycle, Edge Registry Validation & Multi-Hash.
|
WP-22: Content Lifecycle, Edge Registry Validation & Multi-Hash.
|
||||||
FIX: Finale Mistral-Härtung (<s> & [OUT] Tags), robuste JSON-Recovery & DoD-Sync.
|
FIX: Policy-Violation Detection & erzwungener Ollama-Fallback bei Cloud-Refusal.
|
||||||
VERSION: 2.11.11
|
Dies löst das Problem leerer Kantenlisten bei umfangreichen Protokollen.
|
||||||
|
VERSION: 2.11.13
|
||||||
STATUS: Active
|
STATUS: Active
|
||||||
DEPENDENCIES: app.core.parser, app.core.note_payload, app.core.chunker, app.services.llm_service, app.services.edge_registry
|
DEPENDENCIES: app.core.parser, app.core.note_payload, app.core.chunker, app.services.llm_service
|
||||||
"""
|
"""
|
||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
|
|
@ -53,9 +54,10 @@ def extract_json_from_response(text: str) -> Any:
|
||||||
Extrahiert JSON-Daten und bereinigt LLM-Steuerzeichen (Mistral/Llama).
|
Extrahiert JSON-Daten und bereinigt LLM-Steuerzeichen (Mistral/Llama).
|
||||||
Entfernt <s>, [OUT], [/OUT] und Markdown-Blöcke für maximale Robustheit.
|
Entfernt <s>, [OUT], [/OUT] und Markdown-Blöcke für maximale Robustheit.
|
||||||
"""
|
"""
|
||||||
if not text: return []
|
if not text or not isinstance(text, str):
|
||||||
|
return []
|
||||||
|
|
||||||
# 1. Entferne Mistral/Llama Steuerzeichen und Tags
|
# 1. Entferne Mistral/Llama Steuerzeichen und Tags (BOS/EOS)
|
||||||
clean = text.replace("<s>", "").replace("</s>", "")
|
clean = text.replace("<s>", "").replace("</s>", "")
|
||||||
clean = clean.replace("[OUT]", "").replace("[/OUT]", "")
|
clean = clean.replace("[OUT]", "").replace("[/OUT]", "")
|
||||||
clean = clean.strip()
|
clean = clean.strip()
|
||||||
|
|
@ -137,8 +139,8 @@ class IngestionService:
|
||||||
|
|
||||||
async def _perform_smart_edge_allocation(self, text: str, note_id: str) -> List[Dict]:
|
async def _perform_smart_edge_allocation(self, text: str, note_id: str) -> List[Dict]:
|
||||||
"""
|
"""
|
||||||
WP-20: Nutzt das Hybrid LLM für die semantische Kanten-Extraktion.
|
KI-Extraktion mit aktiver Erkennung von Cloud-Ablehnungen (Policy Violations).
|
||||||
Respektiert die Provider-Einstellung (OpenRouter Primary).
|
Erzwingt bei leeren Cloud-Antworten einen automatischen lokalen Ollama-Fallback.
|
||||||
"""
|
"""
|
||||||
provider = self.settings.MINDNET_LLM_PROVIDER
|
provider = self.settings.MINDNET_LLM_PROVIDER
|
||||||
model = self.settings.OPENROUTER_MODEL if provider == "openrouter" else self.settings.GEMINI_MODEL
|
model = self.settings.OPENROUTER_MODEL if provider == "openrouter" else self.settings.GEMINI_MODEL
|
||||||
|
|
@ -153,7 +155,7 @@ class IngestionService:
|
||||||
try:
|
try:
|
||||||
# Sicherheits-Check: Formatierung des Templates gegen KeyError schützen
|
# Sicherheits-Check: Formatierung des Templates gegen KeyError schützen
|
||||||
try:
|
try:
|
||||||
# Nutzt die ersten 6000 Zeichen als Kontext-Fenster
|
# Wir senden max 6000 Zeichen (ca. 1500 Token) an das LLM für die Extraktion
|
||||||
prompt = template.format(
|
prompt = template.format(
|
||||||
text=text[:6000],
|
text=text[:6000],
|
||||||
note_id=note_id,
|
note_id=note_id,
|
||||||
|
|
@ -163,6 +165,7 @@ class IngestionService:
|
||||||
logger.error(f"❌ [Ingestion] Prompt-Template Fehler (Variable {ke} fehlt).")
|
logger.error(f"❌ [Ingestion] Prompt-Template Fehler (Variable {ke} fehlt).")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
# Schritt 1: Anfrage an den primären Provider (Cloud)
|
||||||
response_json = await self.llm.generate_raw_response(
|
response_json = await self.llm.generate_raw_response(
|
||||||
prompt=prompt, priority="background", force_json=True,
|
prompt=prompt, priority="background", force_json=True,
|
||||||
provider=provider, model_override=model
|
provider=provider, model_override=model
|
||||||
|
|
@ -171,20 +174,44 @@ class IngestionService:
|
||||||
# Nutzt den verbesserten Mistral-sicheren JSON-Extraktor
|
# Nutzt den verbesserten Mistral-sicheren JSON-Extraktor
|
||||||
raw_data = extract_json_from_response(response_json)
|
raw_data = extract_json_from_response(response_json)
|
||||||
|
|
||||||
# Recovery: Suche nach Listen in Dictionaries (z.B. {"edges": [...]})
|
# FALLBACK-LOGIK: Wenn Cloud leer liefert (Policy Violation / No data training), erzwinge lokal
|
||||||
|
if not raw_data and provider != "ollama" and self.settings.LLM_FALLBACK_ENABLED:
|
||||||
|
logger.warning(
|
||||||
|
f"🛑 [Ingestion] Cloud-Provider '{provider}' lieferte keine Daten für {note_id}. "
|
||||||
|
f"Mögliche Policy Violation. Erzwinge LOKALEN FALLBACK via Ollama..."
|
||||||
|
)
|
||||||
|
response_json = await self.llm.generate_raw_response(
|
||||||
|
prompt=prompt, priority="background", force_json=True,
|
||||||
|
provider="ollama"
|
||||||
|
)
|
||||||
|
raw_data = extract_json_from_response(response_json)
|
||||||
|
|
||||||
|
# Recovery: Suche nach Listen in Dictionaries (z.B. {"matches": [...]})
|
||||||
if isinstance(raw_data, dict):
|
if isinstance(raw_data, dict):
|
||||||
for k in ["edges", "links", "results", "kanten"]:
|
logger.info(f"ℹ️ [Ingestion] LLM returned dict, trying recovery for {note_id}")
|
||||||
|
found_list = False
|
||||||
|
for k in ["edges", "links", "results", "kanten", "matches", "edge_list"]:
|
||||||
if k in raw_data and isinstance(raw_data[k], list):
|
if k in raw_data and isinstance(raw_data[k], list):
|
||||||
raw_data = raw_data[k]
|
raw_data = raw_data[k]
|
||||||
|
found_list = True
|
||||||
break
|
break
|
||||||
|
# Ultimativer Dict-Fallback: Key-Value Paare als Kanten interpretieren
|
||||||
|
if not found_list:
|
||||||
|
new_list = []
|
||||||
|
for k, v in raw_data.items():
|
||||||
|
if isinstance(v, str): new_list.append(f"{k}:{v}")
|
||||||
|
elif isinstance(v, list):
|
||||||
|
for target in v:
|
||||||
|
if isinstance(target, str): new_list.append(f"{k}:{target}")
|
||||||
|
raw_data = new_list
|
||||||
|
|
||||||
if not isinstance(raw_data, list):
|
if not isinstance(raw_data, list) or not raw_data:
|
||||||
logger.warning(f"⚠️ [Ingestion] LLM lieferte keine Liste für {note_id}")
|
logger.warning(f"⚠️ [Ingestion] LLM lieferte keine extrahierbaren Kanten für {note_id}")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
processed = []
|
processed = []
|
||||||
for item in raw_data:
|
for item in raw_data:
|
||||||
# Fix für 'str' object assignment error: Erkennt sowohl Dict als auch String ["kind:target"]
|
# Erkennt sowohl Dict als auch String ["kind:target"]
|
||||||
if isinstance(item, dict) and "to" in item:
|
if isinstance(item, dict) and "to" in item:
|
||||||
item["provenance"] = "semantic_ai"
|
item["provenance"] = "semantic_ai"
|
||||||
item["line"] = f"ai-{provider}"
|
item["line"] = f"ai-{provider}"
|
||||||
|
|
@ -234,7 +261,7 @@ class IngestionService:
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return {**result, "error": f"Payload failed: {str(e)}"}
|
return {**result, "error": f"Payload failed: {str(e)}"}
|
||||||
|
|
||||||
# 3. Change Detection (Strikte DoD Umsetzung: Kein Shortcut)
|
# 3. Change Detection
|
||||||
old_payload = None if force_replace else self._fetch_note_payload(note_id)
|
old_payload = None if force_replace else self._fetch_note_payload(note_id)
|
||||||
check_key = f"{self.active_hash_mode}:{hash_source}:{hash_normalize}"
|
check_key = f"{self.active_hash_mode}:{hash_source}:{hash_normalize}"
|
||||||
old_hash = (old_payload or {}).get("hashes", {}).get(check_key)
|
old_hash = (old_payload or {}).get("hashes", {}).get(check_key)
|
||||||
|
|
@ -328,7 +355,7 @@ class IngestionService:
|
||||||
except: return None
|
except: return None
|
||||||
|
|
||||||
def _artifacts_missing(self, note_id: str) -> Tuple[bool, bool]:
|
def _artifacts_missing(self, note_id: str) -> Tuple[bool, bool]:
|
||||||
"""Prüft Qdrant aktiv auf vorhandene Chunks und Edges (Kein Shortcut)."""
|
"""Prüft Qdrant aktiv auf vorhandene Chunks und Edges."""
|
||||||
from qdrant_client.http import models as rest
|
from qdrant_client.http import models as rest
|
||||||
try:
|
try:
|
||||||
f = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
|
f = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue
Block a user