anderes LLm und timeout erhöht
This commit is contained in:
parent
e512f768ff
commit
cf5e53f341
|
|
@ -26,7 +26,7 @@ class Settings:
|
||||||
# WP-05 LLM / Ollama
|
# WP-05 LLM / Ollama
|
||||||
OLLAMA_URL: str = os.getenv("MINDNET_OLLAMA_URL", "http://127.0.0.1:11434")
|
OLLAMA_URL: str = os.getenv("MINDNET_OLLAMA_URL", "http://127.0.0.1:11434")
|
||||||
# ÄNDERUNG: Standard auf 'mistral' gesetzt, da bereits lokal vorhanden
|
# ÄNDERUNG: Standard auf 'mistral' gesetzt, da bereits lokal vorhanden
|
||||||
LLM_MODEL: str = os.getenv("MINDNET_LLM_MODEL", "mistral")
|
LLM_MODEL: str = os.getenv("MINDNET_LLM_MODEL", "phi3:mini")
|
||||||
PROMPTS_PATH: str = os.getenv("MINDNET_PROMPTS_PATH", "config/prompts.yaml")
|
PROMPTS_PATH: str = os.getenv("MINDNET_PROMPTS_PATH", "config/prompts.yaml")
|
||||||
|
|
||||||
# API
|
# API
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,8 @@
|
||||||
"""
|
"""
|
||||||
app/services/llm_service.py — LLM Client (Ollama)
|
app/services/llm_service.py — LLM Client (Ollama)
|
||||||
|
|
||||||
Zweck:
|
|
||||||
Kapselt die Kommunikation mit der Ollama API.
|
|
||||||
Lädt Prompts & Templates aus der YAML-Config.
|
|
||||||
|
|
||||||
Version:
|
Version:
|
||||||
0.1.0 (WP-05 Init)
|
0.1.2 (WP-05 Fix: Increased Timeout for CPU Inference)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import httpx
|
import httpx
|
||||||
|
|
@ -22,13 +18,13 @@ class LLMService:
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.settings = get_settings()
|
self.settings = get_settings()
|
||||||
self.prompts = self._load_prompts()
|
self.prompts = self._load_prompts()
|
||||||
self.client = httpx.AsyncClient(base_url=self.settings.OLLAMA_URL, timeout=60.0)
|
# FIX: Timeout auf 120 Sekunden erhöht für CPU-Only Server
|
||||||
|
self.client = httpx.AsyncClient(base_url=self.settings.OLLAMA_URL, timeout=120.0)
|
||||||
|
|
||||||
def _load_prompts(self) -> dict:
|
def _load_prompts(self) -> dict:
|
||||||
"""Lädt Prompts aus der konfigurierten YAML-Datei."""
|
"""Lädt Prompts aus der konfigurierten YAML-Datei."""
|
||||||
path = Path(self.settings.PROMPTS_PATH)
|
path = Path(self.settings.PROMPTS_PATH)
|
||||||
if not path.exists():
|
if not path.exists():
|
||||||
# Fallback, falls Datei noch nicht existiert (Dev-Schutz)
|
|
||||||
logger.warning(f"Prompt config not found at {path}, using defaults.")
|
logger.warning(f"Prompt config not found at {path}, using defaults.")
|
||||||
return {
|
return {
|
||||||
"system_prompt": "You are a helpful AI assistant.",
|
"system_prompt": "You are a helpful AI assistant.",
|
||||||
|
|
@ -56,24 +52,32 @@ class LLMService:
|
||||||
"model": self.settings.LLM_MODEL,
|
"model": self.settings.LLM_MODEL,
|
||||||
"system": system_prompt,
|
"system": system_prompt,
|
||||||
"prompt": final_prompt,
|
"prompt": final_prompt,
|
||||||
"stream": False, # Vorerst kein Streaming für einfacheres Handling
|
"stream": False,
|
||||||
"options": {
|
"options": {
|
||||||
"temperature": 0.7,
|
"temperature": 0.7,
|
||||||
"num_ctx": 4096
|
# Kleinerer Context spart Rechenzeit, falls 4096 zu viel ist
|
||||||
|
"num_ctx": 2048
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = await self.client.post("/api/generate", json=payload)
|
response = await self.client.post("/api/generate", json=payload)
|
||||||
response.raise_for_status()
|
|
||||||
|
if response.status_code != 200:
|
||||||
|
error_msg = response.text
|
||||||
|
logger.error(f"Ollama API Error ({response.status_code}): {error_msg}")
|
||||||
|
return f"Fehler vom LLM (Modell '{self.settings.LLM_MODEL}' vorhanden?): {error_msg}"
|
||||||
|
|
||||||
data = response.json()
|
data = response.json()
|
||||||
return data.get("response", "")
|
return data.get("response", "")
|
||||||
except httpx.HTTPError as e:
|
|
||||||
logger.error(f"Ollama API Error: {e}")
|
except httpx.ReadTimeout:
|
||||||
return f"Fehler bei der Generierung der Antwort: {str(e)}"
|
return "Timeout: Das Modell braucht zu lange zum Antworten (>120s). Hardware-Limit erreicht?"
|
||||||
|
except httpx.ConnectError:
|
||||||
|
return "Verbindungsfehler: Ist Ollama gestartet (Port 11434)?"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"LLM Service Error: {e}")
|
logger.error(f"LLM Service Exception: {e}")
|
||||||
return "Ein unerwarteter Fehler ist aufgetreten."
|
return f"Interner Fehler: {str(e)}"
|
||||||
|
|
||||||
async def close(self):
|
async def close(self):
|
||||||
await self.client.aclose()
|
await self.client.aclose()
|
||||||
Loading…
Reference in New Issue
Block a user