mindnet/app/services/llm_service.py

76 lines
2.5 KiB
Python

"""
app/services/llm_service.py — LLM Client (Ollama)
Version: 0.2.0 (WP-06 Hybrid Router Support)
"""
import httpx
import yaml
import logging
import os
from pathlib import Path
from app.config import get_settings
logger = logging.getLogger(__name__)
class LLMService:
def __init__(self):
self.settings = get_settings()
self.prompts = self._load_prompts()
# Timeout aus Config nutzen (Default 120s)
self.client = httpx.AsyncClient(
base_url=self.settings.OLLAMA_URL,
timeout=self.settings.LLM_TIMEOUT
)
def _load_prompts(self) -> dict:
path = Path(self.settings.PROMPTS_PATH)
if not path.exists():
return {}
try:
with open(path, "r", encoding="utf-8") as f:
return yaml.safe_load(f)
except Exception as e:
logger.error(f"Failed to load prompts: {e}")
return {}
async def generate_raw_response(self, prompt: str) -> str:
"""
NEU: Führt einen direkten LLM Call ohne RAG-Template aus.
Wird vom Router für die Antwortgenerierung genutzt.
"""
payload = {
"model": self.settings.LLM_MODEL,
"prompt": prompt,
"stream": False,
"options": {
"temperature": 0.0,
"num_ctx": 512
}
}
try:
response = await self.client.post("/api/generate", json=payload)
if response.status_code != 200:
logger.error(f"Ollama Error ({response.status_code}): {response.text}")
return "Fehler bei der Generierung."
data = response.json()
return data.get("response", "").strip()
except Exception as e:
logger.error(f"LLM Raw Gen Error: {e}")
return "Interner LLM Fehler."
async def generate_rag_response(self, query: str, context_str: str) -> str:
"""Legacy Support / Fallback"""
system_prompt = self.prompts.get("system_prompt", "")
rag_template = self.prompts.get("rag_template", "{context_str}\n\n{query}")
final_prompt = rag_template.format(context_str=context_str, query=query)
# Wir nutzen intern nun auch raw_response, um Code zu sparen
full_prompt = f"{system_prompt}\n\n{final_prompt}"
return await self.generate_raw_response(full_prompt)
async def close(self):
await self.client.aclose()