79 lines
2.6 KiB
Python
79 lines
2.6 KiB
Python
"""
|
|
app/services/llm_service.py — LLM Client (Ollama)
|
|
|
|
Zweck:
|
|
Kapselt die Kommunikation mit der Ollama API.
|
|
Lädt Prompts & Templates aus der YAML-Config.
|
|
|
|
Version:
|
|
0.1.0 (WP-05 Init)
|
|
"""
|
|
|
|
import httpx
|
|
import yaml
|
|
import logging
|
|
import os
|
|
from pathlib import Path
|
|
from app.config import get_settings
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class LLMService:
|
|
def __init__(self):
|
|
self.settings = get_settings()
|
|
self.prompts = self._load_prompts()
|
|
self.client = httpx.AsyncClient(base_url=self.settings.OLLAMA_URL, timeout=60.0)
|
|
|
|
def _load_prompts(self) -> dict:
|
|
"""Lädt Prompts aus der konfigurierten YAML-Datei."""
|
|
path = Path(self.settings.PROMPTS_PATH)
|
|
if not path.exists():
|
|
# Fallback, falls Datei noch nicht existiert (Dev-Schutz)
|
|
logger.warning(f"Prompt config not found at {path}, using defaults.")
|
|
return {
|
|
"system_prompt": "You are a helpful AI assistant.",
|
|
"rag_template": "Context: {context_str}\nQuestion: {query}"
|
|
}
|
|
|
|
try:
|
|
with open(path, "r", encoding="utf-8") as f:
|
|
return yaml.safe_load(f)
|
|
except Exception as e:
|
|
logger.error(f"Failed to load prompts: {e}")
|
|
return {}
|
|
|
|
async def generate_rag_response(self, query: str, context_str: str) -> str:
|
|
"""
|
|
Generiert eine Antwort basierend auf Query und Kontext.
|
|
"""
|
|
system_prompt = self.prompts.get("system_prompt", "")
|
|
template = self.prompts.get("rag_template", "{context_str}\n\n{query}")
|
|
|
|
# Template füllen
|
|
final_prompt = template.format(context_str=context_str, query=query)
|
|
|
|
payload = {
|
|
"model": self.settings.LLM_MODEL,
|
|
"system": system_prompt,
|
|
"prompt": final_prompt,
|
|
"stream": False, # Vorerst kein Streaming für einfacheres Handling
|
|
"options": {
|
|
"temperature": 0.7,
|
|
"num_ctx": 4096
|
|
}
|
|
}
|
|
|
|
try:
|
|
response = await self.client.post("/api/generate", json=payload)
|
|
response.raise_for_status()
|
|
data = response.json()
|
|
return data.get("response", "")
|
|
except httpx.HTTPError as e:
|
|
logger.error(f"Ollama API Error: {e}")
|
|
return f"Fehler bei der Generierung der Antwort: {str(e)}"
|
|
except Exception as e:
|
|
logger.error(f"LLM Service Error: {e}")
|
|
return "Ein unerwarteter Fehler ist aufgetreten."
|
|
|
|
async def close(self):
|
|
await self.client.aclose() |