neue Wartelogik, neuer Prompt
This commit is contained in:
parent
d25d623b9c
commit
a403d8baf6
|
|
@ -1,37 +1,40 @@
|
||||||
"""
|
"""
|
||||||
app/services/llm_service.py — LLM Client (Ollama)
|
app/services/llm_service.py — LLM Client (Ollama)
|
||||||
Version: 0.3.0 (Fix: JSON Format Enforcement)
|
Version: 0.5.1 (Full: Retry Strategy + Chat Support + JSON Mode)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
import yaml
|
import yaml
|
||||||
import logging
|
import logging
|
||||||
import os
|
import os
|
||||||
|
import asyncio
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
# ANNAHME: app.config ist verfügbar
|
from typing import Optional, Dict, Any
|
||||||
# from app.config import get_settings
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
# --- Mock get_settings für die Vollständigkeit ---
|
|
||||||
class Settings:
|
class Settings:
|
||||||
OLLAMA_URL = os.getenv("MINDNET_OLLAMA_URL", "http://127.0.0.1:11434")
|
OLLAMA_URL = os.getenv("MINDNET_OLLAMA_URL", "http://127.0.0.1:11434")
|
||||||
|
# Timeout für den einzelnen Request (nicht für den gesamten Retry-Zyklus)
|
||||||
LLM_TIMEOUT = float(os.getenv("MINDNET_LLM_TIMEOUT", 300.0))
|
LLM_TIMEOUT = float(os.getenv("MINDNET_LLM_TIMEOUT", 300.0))
|
||||||
LLM_MODEL = os.getenv("MINDNET_LLM_MODEL", "phi3:mini")
|
LLM_MODEL = os.getenv("MINDNET_LLM_MODEL", "phi3:mini")
|
||||||
PROMPTS_PATH = os.getenv("MINDNET_PROMPTS_PATH", "./config/prompts.yaml")
|
PROMPTS_PATH = os.getenv("MINDNET_PROMPTS_PATH", "./config/prompts.yaml")
|
||||||
|
|
||||||
def get_settings():
|
def get_settings():
|
||||||
return Settings()
|
return Settings()
|
||||||
# -----------------------------------------------
|
|
||||||
|
|
||||||
class LLMService:
|
class LLMService:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.settings = get_settings()
|
self.settings = get_settings()
|
||||||
self.prompts = self._load_prompts()
|
self.prompts = self._load_prompts()
|
||||||
|
|
||||||
|
# Connection Limits erhöhen für Parallelität im Import
|
||||||
|
limits = httpx.Limits(max_keepalive_connections=5, max_connections=10)
|
||||||
|
|
||||||
self.client = httpx.AsyncClient(
|
self.client = httpx.AsyncClient(
|
||||||
base_url=self.settings.OLLAMA_URL,
|
base_url=self.settings.OLLAMA_URL,
|
||||||
timeout=self.settings.LLM_TIMEOUT
|
timeout=self.settings.LLM_TIMEOUT,
|
||||||
|
limits=limits
|
||||||
)
|
)
|
||||||
|
|
||||||
def _load_prompts(self) -> dict:
|
def _load_prompts(self) -> dict:
|
||||||
|
|
@ -45,53 +48,92 @@ class LLMService:
|
||||||
logger.error(f"Failed to load prompts: {e}")
|
logger.error(f"Failed to load prompts: {e}")
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
async def generate_raw_response(self, prompt: str, system: str = None, force_json: bool = False) -> str:
|
async def generate_raw_response(
|
||||||
|
self,
|
||||||
|
prompt: str,
|
||||||
|
system: str = None,
|
||||||
|
force_json: bool = False,
|
||||||
|
max_retries: int = 0, # Standard: 0 (Chat failt sofort, Import nutzt >0)
|
||||||
|
base_delay: float = 5.0 # Start-Wartezeit für Backoff
|
||||||
|
) -> str:
|
||||||
"""
|
"""
|
||||||
Führt einen LLM Call aus.
|
Führt einen LLM Call aus.
|
||||||
force_json: NEUER OPTIONALER PARAMETER zur Erzwingung des Ollama JSON-Modus.
|
Features:
|
||||||
|
- JSON Mode (für Semantic Analyzer)
|
||||||
|
- System Prompt (für Persona)
|
||||||
|
- Aggressive Retry (für robusten Import bei Überlast)
|
||||||
"""
|
"""
|
||||||
payload = {
|
payload: Dict[str, Any] = {
|
||||||
"model": self.settings.LLM_MODEL,
|
"model": self.settings.LLM_MODEL,
|
||||||
"prompt": prompt,
|
"prompt": prompt,
|
||||||
"stream": False,
|
"stream": False,
|
||||||
"options": {
|
"options": {
|
||||||
"temperature": 0.7,
|
# JSON braucht niedrige Temperature für valide Syntax
|
||||||
"num_ctx": 2048
|
"temperature": 0.1 if force_json else 0.7,
|
||||||
|
"num_ctx": 4096
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# NEU: Ollama Format Erzwingung (wichtig für Semantic Chunking)
|
|
||||||
if force_json:
|
if force_json:
|
||||||
payload["format"] = "json"
|
payload["format"] = "json"
|
||||||
|
|
||||||
# WICHTIG: System-Prompt separat übergeben
|
|
||||||
if system:
|
if system:
|
||||||
payload["system"] = system
|
payload["system"] = system
|
||||||
|
|
||||||
try:
|
attempt = 0
|
||||||
response = await self.client.post("/api/generate", json=payload)
|
|
||||||
if response.status_code != 200:
|
# RETRY LOOP
|
||||||
logger.error(f"Ollama Error ({response.status_code}): {response.text}")
|
while True:
|
||||||
return "Fehler bei der Generierung."
|
try:
|
||||||
|
response = await self.client.post("/api/generate", json=payload)
|
||||||
data = response.json()
|
|
||||||
return data.get("response", "").strip()
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
except Exception as e:
|
return data.get("response", "").strip()
|
||||||
logger.error(f"LLM Raw Gen Error: {e}")
|
else:
|
||||||
return "Interner LLM Fehler."
|
# HTTP Fehler simulieren, um in den except-Block zu springen
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
# CATCH-ALL: Wir fangen Timeouts, Connection Errors UND Protokollfehler
|
||||||
|
attempt += 1
|
||||||
|
|
||||||
|
# Check: Haben wir noch Versuche?
|
||||||
|
if attempt > max_retries:
|
||||||
|
# Finaler Fehler (wird im Chat oder Log angezeigt)
|
||||||
|
logger.error(f"LLM Final Error (Versuch {attempt}): {e}")
|
||||||
|
return "Interner LLM Fehler."
|
||||||
|
|
||||||
|
# Backoff berechnen (5s, 10s, 20s, 40s...)
|
||||||
|
wait_time = base_delay * (2 ** (attempt - 1))
|
||||||
|
error_msg = str(e) if str(e) else repr(e)
|
||||||
|
|
||||||
|
logger.warning(
|
||||||
|
f"⚠️ LLM Fehler ({attempt}/{max_retries}). "
|
||||||
|
f"Warte {wait_time}s zur Abkühlung... Grund: {error_msg}"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Warten und Loop wiederholen
|
||||||
|
await asyncio.sleep(wait_time)
|
||||||
|
|
||||||
async def generate_rag_response(self, query: str, context_str: str) -> str:
|
async def generate_rag_response(self, query: str, context_str: str) -> str:
|
||||||
"""
|
"""
|
||||||
Legacy Support: Wird vom Chat und Intent Router genutzt.
|
WICHTIG FÜR CHAT:
|
||||||
Ruft generate_raw_response OHNE force_json auf.
|
Generiert eine Antwort basierend auf RAG-Kontext.
|
||||||
|
Nutzt KEINE Retries (User will nicht warten), KEIN JSON.
|
||||||
"""
|
"""
|
||||||
system_prompt = self.prompts.get("system_prompt", "")
|
system_prompt = self.prompts.get("system_prompt", "")
|
||||||
rag_template = self.prompts.get("rag_template", "{context_str}\n\n{query}")
|
rag_template = self.prompts.get("rag_template", "{context_str}\n\n{query}")
|
||||||
|
|
||||||
final_prompt = rag_template.format(context_str=context_str, query=query)
|
final_prompt = rag_template.format(context_str=context_str, query=query)
|
||||||
|
|
||||||
# Aufruf bleibt im Standard-Modus (force_json=False Default)
|
# Chat-Call: force_json=False, max_retries=0
|
||||||
return await self.generate_raw_response(final_prompt, system=system_prompt)
|
return await self.generate_raw_response(
|
||||||
|
final_prompt,
|
||||||
|
system=system_prompt,
|
||||||
|
max_retries=0
|
||||||
|
)
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
await self.client.aclose()
|
if self.client:
|
||||||
|
await self.client.aclose()
|
||||||
|
|
@ -1,11 +1,11 @@
|
||||||
"""
|
"""
|
||||||
app/services/semantic_analyzer.py — Edge Validation & Filtering
|
app/services/semantic_analyzer.py — Edge Validation & Filtering
|
||||||
Version: 1.2 (Extended Observability & Debugging)
|
Version: 1.4 (Merged: Retry Strategy + Extended Observability)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
from typing import List, Optional, Any
|
from typing import List, Optional
|
||||||
from dataclasses import dataclass
|
from dataclasses import dataclass
|
||||||
|
|
||||||
# Importe
|
# Importe
|
||||||
|
|
@ -21,7 +21,10 @@ class SemanticAnalyzer:
|
||||||
"""
|
"""
|
||||||
Sendet einen Chunk und eine Liste potenzieller Kanten an das LLM.
|
Sendet einen Chunk und eine Liste potenzieller Kanten an das LLM.
|
||||||
Das LLM filtert heraus, welche Kanten für diesen Chunk relevant sind.
|
Das LLM filtert heraus, welche Kanten für diesen Chunk relevant sind.
|
||||||
Enthält erweitertes Logging für Debugging.
|
|
||||||
|
Features:
|
||||||
|
- Retry Strategy: Wartet bei Überlastung (max_retries=5).
|
||||||
|
- Observability: Loggt Input-Größe, Raw-Response und Parsing-Details.
|
||||||
"""
|
"""
|
||||||
if not all_edges:
|
if not all_edges:
|
||||||
return []
|
return []
|
||||||
|
|
@ -30,7 +33,7 @@ class SemanticAnalyzer:
|
||||||
prompt_template = self.llm.prompts.get("edge_allocation_template")
|
prompt_template = self.llm.prompts.get("edge_allocation_template")
|
||||||
|
|
||||||
if not prompt_template:
|
if not prompt_template:
|
||||||
logger.warning("⚠️ Prompt 'edge_allocation_template' fehlt. Nutze Fallback-Prompt.")
|
logger.warning("⚠️ [SemanticAnalyzer] Prompt 'edge_allocation_template' fehlt. Nutze Fallback.")
|
||||||
prompt_template = (
|
prompt_template = (
|
||||||
"TASK: Wähle aus den Kandidaten die relevanten Kanten für den Text.\n"
|
"TASK: Wähle aus den Kandidaten die relevanten Kanten für den Text.\n"
|
||||||
"TEXT: {chunk_text}\n"
|
"TEXT: {chunk_text}\n"
|
||||||
|
|
@ -41,23 +44,27 @@ class SemanticAnalyzer:
|
||||||
# 2. Kandidaten-Liste formatieren
|
# 2. Kandidaten-Liste formatieren
|
||||||
edges_str = "\n".join([f"- {e}" for e in all_edges])
|
edges_str = "\n".join([f"- {e}" for e in all_edges])
|
||||||
|
|
||||||
# LOG: Request Info
|
# LOG: Request Info (Wichtig für Debugging)
|
||||||
logger.debug(f"🔍 [SemanticAnalyzer] Request: {len(chunk_text)} chars Text, {len(all_edges)} Candidates.")
|
logger.debug(f"🔍 [SemanticAnalyzer] Request: {len(chunk_text)} chars Text, {len(all_edges)} Candidates.")
|
||||||
|
|
||||||
# 3. Prompt füllen
|
# 3. Prompt füllen
|
||||||
final_prompt = prompt_template.format(
|
final_prompt = prompt_template.format(
|
||||||
chunk_text=chunk_text[:3000],
|
chunk_text=chunk_text[:3500], # Etwas mehr Kontext als früher (3000 -> 3500)
|
||||||
edge_list=edges_str
|
edge_list=edges_str
|
||||||
)
|
)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# 4. LLM Call mit JSON Erzwingung
|
# 4. LLM Call mit JSON Erzwingung UND Retry-Logik (Merged V1.3)
|
||||||
|
# max_retries=5 bedeutet: 5s -> 10s -> 20s -> 40s -> 80s Pause.
|
||||||
response_json = await self.llm.generate_raw_response(
|
response_json = await self.llm.generate_raw_response(
|
||||||
prompt=final_prompt,
|
prompt=final_prompt,
|
||||||
force_json=True
|
force_json=True,
|
||||||
|
max_retries=5,
|
||||||
|
base_delay=5.0
|
||||||
)
|
)
|
||||||
|
|
||||||
# LOG: Raw Response (nur die ersten 200 Zeichen, um Log nicht zu fluten, außer bei Fehler)
|
# LOG: Raw Response Preview (Wichtig um zu sehen, was das LLM liefert)
|
||||||
|
# Zeigt nur die ersten 200 Zeichen, um Log nicht zu fluten
|
||||||
logger.debug(f"📥 [SemanticAnalyzer] Raw Response (Preview): {response_json[:200]}...")
|
logger.debug(f"📥 [SemanticAnalyzer] Raw Response (Preview): {response_json[:200]}...")
|
||||||
|
|
||||||
# 5. Parsing & Cleaning
|
# 5. Parsing & Cleaning
|
||||||
|
|
@ -73,7 +80,7 @@ class SemanticAnalyzer:
|
||||||
# LOG: Detaillierter Fehlerbericht für den User
|
# LOG: Detaillierter Fehlerbericht für den User
|
||||||
logger.error(f"❌ [SemanticAnalyzer] JSON Decode Error.")
|
logger.error(f"❌ [SemanticAnalyzer] JSON Decode Error.")
|
||||||
logger.error(f" Grund: {json_err}")
|
logger.error(f" Grund: {json_err}")
|
||||||
logger.error(f" Empfangener String: {clean_json}")
|
logger.error(f" Empfangener String: {clean_json[:500]}") # Zeige max 500 chars des Fehlers
|
||||||
logger.info(" -> Workaround: Fallback auf 'Alle Kanten' (durch Chunker).")
|
logger.info(" -> Workaround: Fallback auf 'Alle Kanten' (durch Chunker).")
|
||||||
return []
|
return []
|
||||||
|
|
||||||
|
|
@ -85,7 +92,7 @@ class SemanticAnalyzer:
|
||||||
valid_edges = [str(e) for e in data if isinstance(e, str) and ":" in e]
|
valid_edges = [str(e) for e in data if isinstance(e, str) and ":" in e]
|
||||||
|
|
||||||
elif isinstance(data, dict):
|
elif isinstance(data, dict):
|
||||||
# Abweichende Formate behandeln
|
# Abweichende Formate behandeln (Extended Logging V1.2)
|
||||||
logger.info(f"ℹ️ [SemanticAnalyzer] LLM lieferte Dict statt Liste. Versuche Reparatur. Keys: {list(data.keys())}")
|
logger.info(f"ℹ️ [SemanticAnalyzer] LLM lieferte Dict statt Liste. Versuche Reparatur. Keys: {list(data.keys())}")
|
||||||
|
|
||||||
for key, val in data.items():
|
for key, val in data.items():
|
||||||
|
|
@ -108,6 +115,7 @@ class SemanticAnalyzer:
|
||||||
|
|
||||||
# LOG: Ergebnis
|
# LOG: Ergebnis
|
||||||
if final_result:
|
if final_result:
|
||||||
|
# Nur Info, wenn wirklich was gefunden wurde, sonst spammt es bei leeren Chunks
|
||||||
logger.info(f"✅ [SemanticAnalyzer] Success. {len(final_result)} Kanten zugewiesen.")
|
logger.info(f"✅ [SemanticAnalyzer] Success. {len(final_result)} Kanten zugewiesen.")
|
||||||
else:
|
else:
|
||||||
logger.debug(" [SemanticAnalyzer] Keine spezifischen Kanten erkannt (Empty Result).")
|
logger.debug(" [SemanticAnalyzer] Keine spezifischen Kanten erkannt (Empty Result).")
|
||||||
|
|
|
||||||
|
|
@ -143,25 +143,27 @@ interview_template: |
|
||||||
# ---------------------------------------------------------
|
# ---------------------------------------------------------
|
||||||
# 6. EDGE_ALLOCATION: Kantenfilter (Intent: OFFLINE_FILTER)
|
# 6. EDGE_ALLOCATION: Kantenfilter (Intent: OFFLINE_FILTER)
|
||||||
# ---------------------------------------------------------
|
# ---------------------------------------------------------
|
||||||
edge_allocation_template: |
|
eedge_allocation_template: |
|
||||||
edge_allocation_template: |
|
|
||||||
TASK:
|
TASK:
|
||||||
Du bist ein semantischer Filter für einen Knowledge Graph.
|
Du bist ein JSON-Filter. Deine Aufgabe ist es, aus einer Liste von "Kandidaten" nur jene Strings auszuwählen, die inhaltlich zum "Textabschnitt" passen.
|
||||||
Ordne die unten stehenden "Kandidaten-Kanten" dem vorliegenden Textabschnitt zu.
|
|
||||||
|
|
||||||
TEXTABSCHNITT:
|
TEXTABSCHNITT:
|
||||||
"""
|
"""
|
||||||
{chunk_text}
|
{chunk_text}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
KANDIDATEN-KANTEN (Gefunden im gesamten Dokument):
|
KANDIDATEN (Liste):
|
||||||
{edge_list}
|
{edge_list}
|
||||||
|
|
||||||
ANWEISUNG:
|
REGELN:
|
||||||
1. Welche der Kandidaten-Kanten sind für das Verständnis DIESES spezifischen Textabschnitts relevant?
|
1. Wähle nur Kanten, die für den Textabschnitt relevant sind.
|
||||||
2. Gib NUR die relevanten Kanten als JSON-Liste von Strings zurück.
|
2. Gib das Ergebnis als flache JSON-Liste zurück.
|
||||||
3. Verändere den Wortlaut der Kanten nicht.
|
3. Verändere die Strings nicht.
|
||||||
4. Wenn keine Kante passt, gib eine leere Liste [] zurück.
|
4. KEINE Objekte, KEINE Keys wie "edges" oder "kanten". Nur die Liste.
|
||||||
|
|
||||||
OUTPUT FORMAT (JSON):
|
BEISPIEL:
|
||||||
["kind:Target", "kind:Target"]
|
Input Kandidaten: ["uses:ToolA", "references:DocB", "related_to:ThemaC"]
|
||||||
|
Text erwähnt ToolA aber nicht DocB.
|
||||||
|
Output: ["uses:ToolA"]
|
||||||
|
|
||||||
|
DEIN OUTPUT (JSON):
|
||||||
Loading…
Reference in New Issue
Block a user