""" app/services/llm_service.py — LLM Client (Ollama) Version: 0.3.0 (Fix: JSON Format Enforcement) """ import httpx import yaml import logging import os from pathlib import Path # ANNAHME: app.config ist verfügbar # from app.config import get_settings logger = logging.getLogger(__name__) # --- Mock get_settings für die Vollständigkeit --- class Settings: OLLAMA_URL = os.getenv("MINDNET_OLLAMA_URL", "http://127.0.0.1:11434") LLM_TIMEOUT = float(os.getenv("MINDNET_LLM_TIMEOUT", 300.0)) LLM_MODEL = os.getenv("MINDNET_LLM_MODEL", "phi3:mini") PROMPTS_PATH = os.getenv("MINDNET_PROMPTS_PATH", "./config/prompts.yaml") def get_settings(): return Settings() # ----------------------------------------------- class LLMService: def __init__(self): self.settings = get_settings() self.prompts = self._load_prompts() self.client = httpx.AsyncClient( base_url=self.settings.OLLAMA_URL, timeout=self.settings.LLM_TIMEOUT ) def _load_prompts(self) -> dict: path = Path(self.settings.PROMPTS_PATH) if not path.exists(): return {} try: with open(path, "r", encoding="utf-8") as f: return yaml.safe_load(f) except Exception as e: logger.error(f"Failed to load prompts: {e}") return {} async def generate_raw_response(self, prompt: str, system: str = None, force_json: bool = False) -> str: """ Führt einen LLM Call aus. force_json: NEUER OPTIONALER PARAMETER zur Erzwingung des Ollama JSON-Modus. """ payload = { "model": self.settings.LLM_MODEL, "prompt": prompt, "stream": False, "options": { "temperature": 0.7, "num_ctx": 2048 } } # NEU: Ollama Format Erzwingung (wichtig für Semantic Chunking) if force_json: payload["format"] = "json" # WICHTIG: System-Prompt separat übergeben if system: payload["system"] = system try: response = await self.client.post("/api/generate", json=payload) if response.status_code != 200: logger.error(f"Ollama Error ({response.status_code}): {response.text}") return "Fehler bei der Generierung." data = response.json() return data.get("response", "").strip() except Exception as e: logger.error(f"LLM Raw Gen Error: {e}") return "Interner LLM Fehler." async def generate_rag_response(self, query: str, context_str: str) -> str: """ Legacy Support: Wird vom Chat und Intent Router genutzt. Ruft generate_raw_response OHNE force_json auf. """ system_prompt = self.prompts.get("system_prompt", "") rag_template = self.prompts.get("rag_template", "{context_str}\n\n{query}") final_prompt = rag_template.format(context_str=context_str, query=query) # Aufruf bleibt im Standard-Modus (force_json=False Default) return await self.generate_raw_response(final_prompt, system=system_prompt) async def close(self): await self.client.aclose()