80 lines
2.6 KiB
Python
80 lines
2.6 KiB
Python
"""
|
|
app/services/llm_service.py — LLM Client (Ollama)
|
|
Version: 0.2.1 (Fix: System Prompt Handling for Phi-3)
|
|
"""
|
|
|
|
import httpx
|
|
import yaml
|
|
import logging
|
|
import os
|
|
from pathlib import Path
|
|
from app.config import get_settings
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class LLMService:
|
|
def __init__(self):
|
|
self.settings = get_settings()
|
|
self.prompts = self._load_prompts()
|
|
|
|
self.client = httpx.AsyncClient(
|
|
base_url=self.settings.OLLAMA_URL,
|
|
timeout=self.settings.LLM_TIMEOUT
|
|
)
|
|
|
|
def _load_prompts(self) -> dict:
|
|
path = Path(self.settings.PROMPTS_PATH)
|
|
if not path.exists():
|
|
return {}
|
|
try:
|
|
with open(path, "r", encoding="utf-8") as f:
|
|
return yaml.safe_load(f)
|
|
except Exception as e:
|
|
logger.error(f"Failed to load prompts: {e}")
|
|
return {}
|
|
|
|
async def generate_raw_response(self, prompt: str, system: str = None) -> str:
|
|
"""
|
|
Führt einen LLM Call aus.
|
|
Unterstützt nun explizite System-Prompts für sauberes Templating.
|
|
"""
|
|
payload = {
|
|
"model": self.settings.LLM_MODEL,
|
|
"prompt": prompt,
|
|
"stream": False,
|
|
"options": {
|
|
# Temperature etwas höher für Empathie, niedriger für Code?
|
|
# Wir lassen es auf Standard, oder steuern es später via Config.
|
|
"temperature": 0.7,
|
|
"num_ctx": 2048
|
|
}
|
|
}
|
|
|
|
# WICHTIG: System-Prompt separat übergeben, damit Ollama formatiert
|
|
if system:
|
|
payload["system"] = system
|
|
|
|
try:
|
|
response = await self.client.post("/api/generate", json=payload)
|
|
if response.status_code != 200:
|
|
logger.error(f"Ollama Error ({response.status_code}): {response.text}")
|
|
return "Fehler bei der Generierung."
|
|
|
|
data = response.json()
|
|
return data.get("response", "").strip()
|
|
|
|
except Exception as e:
|
|
logger.error(f"LLM Raw Gen Error: {e}")
|
|
return "Interner LLM Fehler."
|
|
|
|
async def generate_rag_response(self, query: str, context_str: str) -> str:
|
|
"""Legacy Support"""
|
|
system_prompt = self.prompts.get("system_prompt", "")
|
|
rag_template = self.prompts.get("rag_template", "{context_str}\n\n{query}")
|
|
final_prompt = rag_template.format(context_str=context_str, query=query)
|
|
|
|
# Leite an die neue Methode weiter
|
|
return await self.generate_raw_response(final_prompt, system=system_prompt)
|
|
|
|
async def close(self):
|
|
await self.client.aclose() |