mindnet/app/services/llm_service.py

83 lines
2.9 KiB
Python

"""
app/services/llm_service.py — LLM Client (Ollama)
Version:
0.1.2 (WP-05 Fix: Increased Timeout for CPU Inference)
"""
import httpx
import yaml
import logging
import os
from pathlib import Path
from app.config import get_settings
logger = logging.getLogger(__name__)
class LLMService:
def __init__(self):
self.settings = get_settings()
self.prompts = self._load_prompts()
# FIX: Timeout auf 120 Sekunden erhöht für CPU-Only Server
self.client = httpx.AsyncClient(base_url=self.settings.OLLAMA_URL, timeout=120.0)
def _load_prompts(self) -> dict:
"""Lädt Prompts aus der konfigurierten YAML-Datei."""
path = Path(self.settings.PROMPTS_PATH)
if not path.exists():
logger.warning(f"Prompt config not found at {path}, using defaults.")
return {
"system_prompt": "You are a helpful AI assistant.",
"rag_template": "Context: {context_str}\nQuestion: {query}"
}
try:
with open(path, "r", encoding="utf-8") as f:
return yaml.safe_load(f)
except Exception as e:
logger.error(f"Failed to load prompts: {e}")
return {}
async def generate_rag_response(self, query: str, context_str: str) -> str:
"""
Generiert eine Antwort basierend auf Query und Kontext.
"""
system_prompt = self.prompts.get("system_prompt", "")
template = self.prompts.get("rag_template", "{context_str}\n\n{query}")
# Template füllen
final_prompt = template.format(context_str=context_str, query=query)
payload = {
"model": self.settings.LLM_MODEL,
"system": system_prompt,
"prompt": final_prompt,
"stream": False,
"options": {
"temperature": 0.7,
# Kleinerer Context spart Rechenzeit, falls 4096 zu viel ist
"num_ctx": 2048
}
}
try:
response = await self.client.post("/api/generate", json=payload)
if response.status_code != 200:
error_msg = response.text
logger.error(f"Ollama API Error ({response.status_code}): {error_msg}")
return f"Fehler vom LLM (Modell '{self.settings.LLM_MODEL}' vorhanden?): {error_msg}"
data = response.json()
return data.get("response", "")
except httpx.ReadTimeout:
return "Timeout: Das Modell braucht zu lange zum Antworten (>120s). Hardware-Limit erreicht?"
except httpx.ConnectError:
return "Verbindungsfehler: Ist Ollama gestartet (Port 11434)?"
except Exception as e:
logger.error(f"LLM Service Exception: {e}")
return f"Interner Fehler: {str(e)}"
async def close(self):
await self.client.aclose()