159 lines
6.9 KiB
Python
159 lines
6.9 KiB
Python
"""
|
|
FILE: app/services/llm_service.py
|
|
DESCRIPTION: Hybrid-Client für Ollama, Google GenAI und OpenRouter.
|
|
Verwaltet provider-spezifische Prompts und Background-Last.
|
|
VERSION: 3.3.0 (Full SDK Integration)
|
|
"""
|
|
import httpx
|
|
import yaml
|
|
import logging
|
|
import asyncio
|
|
import json
|
|
from google import genai
|
|
from google.genai import types
|
|
from openai import AsyncOpenAI # Für OpenRouter
|
|
from pathlib import Path
|
|
from typing import Optional, Dict, Any, Literal
|
|
from app.config import get_settings
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class LLMService:
|
|
_background_semaphore = None
|
|
|
|
def __init__(self):
|
|
self.settings = get_settings()
|
|
self.prompts = self._load_prompts()
|
|
|
|
# WP-06: Semaphore-Initialisierung
|
|
if LLMService._background_semaphore is None:
|
|
limit = self.settings.BACKGROUND_LIMIT
|
|
logger.info(f"🚦 LLMService: Background Semaphore initialized with limit: {limit}")
|
|
LLMService._background_semaphore = asyncio.Semaphore(limit)
|
|
|
|
# 1. Lokaler Ollama Client
|
|
self.ollama_client = httpx.AsyncClient(
|
|
base_url=self.settings.OLLAMA_URL,
|
|
timeout=httpx.Timeout(self.settings.LLM_TIMEOUT)
|
|
)
|
|
|
|
# 2. Google GenAI Client (Modern SDK)
|
|
self.google_client = None
|
|
if self.settings.GOOGLE_API_KEY:
|
|
self.google_client = genai.Client(api_key=self.settings.GOOGLE_API_KEY)
|
|
logger.info("✨ LLMService: Google GenAI (Gemini) active.")
|
|
|
|
# 3. OpenRouter Client
|
|
self.openrouter_client = None
|
|
if self.settings.OPENROUTER_API_KEY:
|
|
self.openrouter_client = AsyncOpenAI(
|
|
base_url="https://openrouter.ai/api/v1",
|
|
api_key=self.settings.OPENROUTER_API_KEY
|
|
)
|
|
logger.info("🛰️ LLMService: OpenRouter Integration active.")
|
|
|
|
def _load_prompts(self) -> dict:
|
|
path = Path(self.settings.PROMPTS_PATH)
|
|
if not path.exists(): return {}
|
|
try:
|
|
with open(path, "r", encoding="utf-8") as f: return yaml.safe_load(f) or {}
|
|
except Exception as e:
|
|
logger.error(f"Failed to load prompts: {e}")
|
|
return {}
|
|
|
|
def get_prompt(self, key: str, provider: str = None) -> str:
|
|
"""Hole provider-spezifisches Template mit Fallback-Kaskade."""
|
|
active_provider = provider or self.settings.MINDNET_LLM_PROVIDER
|
|
data = self.prompts.get(key, "")
|
|
if isinstance(data, dict):
|
|
return data.get(active_provider, data.get("ollama", ""))
|
|
return str(data)
|
|
|
|
async def generate_raw_response(
|
|
self, prompt: str, system: str = None, force_json: bool = False,
|
|
max_retries: int = 2, base_delay: float = 2.0,
|
|
priority: Literal["realtime", "background"] = "realtime",
|
|
provider: Optional[str] = None,
|
|
model_override: Optional[str] = None
|
|
) -> str:
|
|
"""Einstiegspunkt mit Priority-Handling."""
|
|
target_provider = provider or self.settings.MINDNET_LLM_PROVIDER
|
|
|
|
if priority == "background":
|
|
async with LLMService._background_semaphore:
|
|
return await self._dispatch(target_provider, prompt, system, force_json, max_retries, base_delay, model_override)
|
|
return await self._dispatch(target_provider, prompt, system, force_json, max_retries, base_delay, model_override)
|
|
|
|
async def _dispatch(self, provider, prompt, system, force_json, max_retries, base_delay, model_override):
|
|
try:
|
|
if provider == "openrouter" and self.openrouter_client:
|
|
return await self._execute_openrouter(prompt, system, force_json, model_override)
|
|
if provider == "gemini" and self.google_client:
|
|
return await self._execute_google(prompt, system, force_json, model_override)
|
|
return await self._execute_ollama(prompt, system, force_json, max_retries, base_delay)
|
|
except Exception as e:
|
|
if self.settings.LLM_FALLBACK_ENABLED and provider != "ollama":
|
|
logger.warning(f"🔄 Provider {provider} failed: {e}. Falling back to Ollama.")
|
|
return await self._execute_ollama(prompt, system, force_json, max_retries, base_delay)
|
|
raise e
|
|
|
|
async def _execute_google(self, prompt, system, force_json, model_override):
|
|
"""Native Google SDK Integration."""
|
|
model = model_override or self.settings.GEMINI_MODEL
|
|
config = types.GenerateContentConfig(
|
|
system_instruction=system,
|
|
response_mime_type="application/json" if force_json else "text/plain"
|
|
)
|
|
# Synchroner SDK-Call in Thread auslagern
|
|
response = await asyncio.to_thread(
|
|
self.google_client.models.generate_content,
|
|
model=model, contents=prompt, config=config
|
|
)
|
|
return response.text.strip()
|
|
|
|
async def _execute_openrouter(self, prompt, system, force_json, model_override):
|
|
"""OpenRouter (OpenAI-kompatibel)."""
|
|
model = model_override or self.settings.OPENROUTER_MODEL
|
|
messages = []
|
|
if system: messages.append({"role": "system", "content": system})
|
|
messages.append({"role": "user", "content": prompt})
|
|
|
|
response = await self.openrouter_client.chat.completions.create(
|
|
model=model,
|
|
messages=messages,
|
|
response_format={"type": "json_object"} if force_json else None
|
|
)
|
|
return response.choices[0].message.content.strip()
|
|
|
|
async def _execute_ollama(self, prompt, system, force_json, max_retries, base_delay):
|
|
"""Ollama mit exponentiellem Backoff."""
|
|
payload = {
|
|
"model": self.settings.LLM_MODEL, "prompt": prompt, "stream": False,
|
|
"options": {"temperature": 0.1 if force_json else 0.7, "num_ctx": 8192}
|
|
}
|
|
if force_json: payload["format"] = "json"
|
|
if system: payload["system"] = system
|
|
|
|
attempt = 0
|
|
while True:
|
|
try:
|
|
res = await self.ollama_client.post("/api/generate", json=payload)
|
|
res.raise_for_status()
|
|
return res.json().get("response", "").strip()
|
|
except Exception as e:
|
|
attempt += 1
|
|
if attempt > max_retries: raise e
|
|
wait = base_delay * (2 ** (attempt - 1))
|
|
logger.warning(f"⚠️ Ollama retry {attempt} in {wait}s...")
|
|
await asyncio.sleep(wait)
|
|
|
|
async def generate_rag_response(self, query: str, context_str: str) -> str:
|
|
"""Vollständiger RAG-Wrapper."""
|
|
provider = self.settings.MINDNET_LLM_PROVIDER
|
|
system = self.get_prompt("system_prompt", provider)
|
|
template = self.get_prompt("rag_template", provider)
|
|
final_prompt = template.format(context_str=context_str, query=query)
|
|
return await self.generate_raw_response(final_prompt, system=system, priority="realtime")
|
|
|
|
async def close(self):
|
|
await self.ollama_client.aclose() |