neue Version mit Wartezeit bei externen LLM Fehler

This commit is contained in:
Lars 2025-12-25 13:03:42 +01:00
parent c9cf1b7e4c
commit ecfdc67485
2 changed files with 93 additions and 88 deletions

View File

@ -2,8 +2,8 @@
FILE: app/config.py
DESCRIPTION: Zentrale Pydantic-Konfiguration.
WP-20: Hybrid-Cloud Modus Support (OpenRouter/Gemini/Ollama).
FIX: Update auf Gemini 2.5 Serie & Optimierung für Gemma 2 Durchsatz.
VERSION: 0.6.6
FIX: Einführung von Parametern zur intelligenten Rate-Limit Steuerung (429 Handling).
VERSION: 0.6.7
STATUS: Active
DEPENDENCIES: os, functools, pathlib, python-dotenv
"""
@ -27,32 +27,36 @@ class Settings:
VECTOR_SIZE: int = int(os.getenv("VECTOR_DIM", "768"))
DISTANCE: str = os.getenv("MINDNET_DISTANCE", "Cosine")
# --- Lokale Embeddings ---
# --- Lokale Embeddings (Ollama & Sentence-Transformers) ---
EMBEDDING_MODEL: str = os.getenv("MINDNET_EMBEDDING_MODEL", "nomic-embed-text")
MODEL_NAME: str = os.getenv("MINDNET_MODEL", "sentence-transformers/all-MiniLM-L6-v2")
# --- WP-20 Hybrid LLM Provider ---
# "openrouter" ist primär für den Ingest-Turbo mit Gemma 2 empfohlen.
# Erlaubt: "ollama" | "gemini" | "openrouter"
MINDNET_LLM_PROVIDER: str = os.getenv("MINDNET_LLM_PROVIDER", "openrouter").lower()
# Google AI Studio (Fallback auf 2.5-Serie)
# Google AI Studio (2025er Lite-Modell für höhere Kapazität)
GOOGLE_API_KEY: str | None = os.getenv("GOOGLE_API_KEY")
# "gemini-2.5-flash-lite" ist die skalierbare 2025-Alternative für hohe Last.
GEMINI_MODEL: str = os.getenv("MINDNET_GEMINI_MODEL", "gemini-2.5-flash-lite")
# OpenRouter Integration (openai/gpt-oss-20b:free oder gemma-2)
# OpenRouter Integration (Verfügbares Free-Modell 2025)
OPENROUTER_API_KEY: str | None = os.getenv("OPENROUTER_API_KEY")
# "google/gemma-2-9b-it:free" bietet hohe Kapazität bei Kostenfreiheit.
OPENROUTER_MODEL: str = os.getenv("OPENROUTER_MODEL", "google/gemma-2-9b-it:free")
OPENROUTER_MODEL: str = os.getenv("OPENROUTER_MODEL", "mistralai/mistral-7b-instruct:free")
LLM_FALLBACK_ENABLED: bool = os.getenv("MINDNET_LLM_FALLBACK", "true").lower() == "true"
# --- NEU: Intelligente Rate-Limit Steuerung ---
# Dauer der Wartezeit in Sekunden, wenn ein HTTP 429 (Rate Limit) auftritt
LLM_RATE_LIMIT_WAIT: float = float(os.getenv("MINDNET_LLM_RATE_LIMIT_WAIT", "60.0"))
# Anzahl der Cloud-Retries bei 429, bevor Ollama-Fallback greift
LLM_RATE_LIMIT_RETRIES: int = int(os.getenv("MINDNET_LLM_RATE_LIMIT_RETRIES", "3"))
# --- WP-05 Lokales LLM (Ollama) ---
OLLAMA_URL: str = os.getenv("MINDNET_OLLAMA_URL", "http://127.0.0.1:11434")
LLM_MODEL: str = os.getenv("MINDNET_LLM_MODEL", "phi3:mini")
PROMPTS_PATH: str = os.getenv("MINDNET_PROMPTS_PATH", "config/prompts.yaml")
# --- Performance & Last-Steuerung ---
# --- WP-06 / WP-14 Performance & Last-Steuerung ---
LLM_TIMEOUT: float = float(os.getenv("MINDNET_LLM_TIMEOUT", "300.0"))
DECISION_CONFIG_PATH: str = os.getenv("MINDNET_DECISION_CONFIG", "config/decision_engine.yaml")
BACKGROUND_LIMIT: int = int(os.getenv("MINDNET_LLM_BACKGROUND_LIMIT", "2"))
@ -62,8 +66,6 @@ class Settings:
MINDNET_VAULT_ROOT: str = os.getenv("MINDNET_VAULT_ROOT", "./vault_master")
MINDNET_TYPES_FILE: str = os.getenv("MINDNET_TYPES_FILE", "config/types.yaml")
MINDNET_VOCAB_PATH: str = os.getenv("MINDNET_VOCAB_PATH", "/mindnet/vault/mindnet/_system/dictionary/edge_vocabulary.md")
# WP-22: 'full' für Multi-Hash Change Detection
CHANGE_DETECTION_MODE: str = os.getenv("MINDNET_CHANGE_DETECTION_MODE", "full")
# --- WP-04 Retriever Gewichte ---

View File

@ -4,10 +4,11 @@ DESCRIPTION: Hybrid-Client für Ollama, Google GenAI (Gemini) und OpenRouter.
Verwaltet provider-spezifische Prompts und Background-Last.
WP-20: Optimiertes Fallback-Management zum Schutz von Cloud-Quoten.
WP-20 Fix: Bulletproof Prompt-Auflösung für format() Aufrufe.
WP-22/JSON: Optionales JSON-Schema + strict (für OpenRouter structured outputs),
OHNE Breaking Changes (neue Parameter nur am Ende).
VERSION: 3.3.3
WP-22/JSON: Optionales JSON-Schema + strict (für OpenRouter structured outputs).
FIX: Intelligente Rate-Limit Erkennung (429 Handling), v1-API Sync & Timeouts.
VERSION: 3.3.6
STATUS: Active
DEPENDENCIES: httpx, yaml, logging, asyncio, json, google-genai, openai, app.config
"""
import httpx
import yaml
@ -47,7 +48,11 @@ class LLMService:
# 2. Google GenAI Client (Modern SDK)
self.google_client = None
if self.settings.GOOGLE_API_KEY:
self.google_client = genai.Client(api_key=self.settings.GOOGLE_API_KEY)
# FIX: Wir erzwingen api_version 'v1' für höhere Stabilität bei 2.5er Modellen.
self.google_client = genai.Client(
api_key=self.settings.GOOGLE_API_KEY,
http_options={'api_version': 'v1'}
)
logger.info("✨ LLMService: Google GenAI (Gemini) active.")
# 3. OpenRouter Client
@ -55,7 +60,9 @@ class LLMService:
if self.settings.OPENROUTER_API_KEY:
self.openrouter_client = AsyncOpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=self.settings.OPENROUTER_API_KEY
api_key=self.settings.OPENROUTER_API_KEY,
# Strikter Timeout für OpenRouter Free-Tier zur Vermeidung von Hangs.
timeout=45.0
)
logger.info("🛰️ LLMService: OpenRouter Integration active.")
@ -84,7 +91,7 @@ class LLMService:
data = self.prompts.get(key, "")
if isinstance(data, dict):
# Wir versuchen erst den Provider, dann Gemini (weil ähnlich leistungsfähig), dann Ollama
# Wir versuchen erst den Provider, dann Gemini, dann Ollama
val = data.get(active_provider, data.get("gemini", data.get("ollama", "")))
# Falls val durch YAML-Fehler immer noch ein Dict ist, extrahiere ersten String
@ -105,51 +112,32 @@ class LLMService:
priority: Literal["realtime", "background"] = "realtime",
provider: Optional[str] = None,
model_override: Optional[str] = None,
# --- NEW (am Ende => rückwärtskompatibel!) ---
json_schema: Optional[Dict[str, Any]] = None,
json_schema_name: str = "mindnet_json",
strict_json_schema: bool = True
) -> str:
"""
Haupteinstiegspunkt für LLM-Anfragen mit Priorisierung.
force_json:
- Ollama: nutzt payload["format"]="json"
- Gemini: nutzt response_mime_type="application/json"
- OpenRouter: nutzt response_format=json_object (Fallback) oder json_schema (structured outputs)
json_schema + strict_json_schema (nur OpenRouter relevant):
- Wenn json_schema gesetzt ist UND force_json=True -> response_format.type="json_schema"
- strict_json_schema wird an OpenRouter/Provider weitergereicht (best effort je nach Provider)
- OpenRouter: nutzt response_format=json_object (Fallback) oder json_schema
"""
target_provider = provider or self.settings.MINDNET_LLM_PROVIDER
if priority == "background":
async with LLMService._background_semaphore:
return await self._dispatch(
target_provider,
prompt,
system,
force_json,
max_retries,
base_delay,
model_override,
json_schema,
json_schema_name,
strict_json_schema
target_provider, prompt, system, force_json,
max_retries, base_delay, model_override,
json_schema, json_schema_name, strict_json_schema
)
return await self._dispatch(
target_provider,
prompt,
system,
force_json,
max_retries,
base_delay,
model_override,
json_schema,
json_schema_name,
strict_json_schema
target_provider, prompt, system, force_json,
max_retries, base_delay, model_override,
json_schema, json_schema_name, strict_json_schema
)
async def _dispatch(
@ -165,47 +153,73 @@ class LLMService:
json_schema_name: str,
strict_json_schema: bool
) -> str:
"""Routet die Anfrage an den physikalischen API-Provider."""
try:
if provider == "openrouter" and self.openrouter_client:
return await self._execute_openrouter(
prompt=prompt,
system=system,
force_json=force_json,
model_override=model_override,
json_schema=json_schema,
json_schema_name=json_schema_name,
strict_json_schema=strict_json_schema
)
"""
Routet die Anfrage mit intelligenter Rate-Limit Erkennung (WP-20 + WP-76).
Schleife läuft über MINDNET_LLM_RATE_LIMIT_RETRIES.
"""
rate_limit_attempts = 0
max_rate_retries = getattr(self.settings, "LLM_RATE_LIMIT_RETRIES", 3)
wait_time = getattr(self.settings, "LLM_RATE_LIMIT_WAIT", 60.0)
if provider == "gemini" and self.google_client:
return await self._execute_google(prompt, system, force_json, model_override)
while rate_limit_attempts <= max_rate_retries:
try:
if provider == "openrouter" and self.openrouter_client:
return await self._execute_openrouter(
prompt=prompt,
system=system,
force_json=force_json,
model_override=model_override,
json_schema=json_schema,
json_schema_name=json_schema_name,
strict_json_schema=strict_json_schema
)
# Default/Fallback zu Ollama
return await self._execute_ollama(prompt, system, force_json, max_retries, base_delay)
if provider == "gemini" and self.google_client:
return await self._execute_google(prompt, system, force_json, model_override)
except Exception as e:
# QUOTEN-SCHUTZ: Wenn Cloud (OpenRouter/Gemini) fehlschlägt,
# gehen wir IMMER zu Ollama, niemals von OpenRouter zu Gemini.
if self.settings.LLM_FALLBACK_ENABLED and provider != "ollama":
logger.warning(
f"🔄 Provider {provider} failed: {e}. Falling back to LOCAL OLLAMA to protect cloud quotas."
)
# Default/Fallback zu Ollama
return await self._execute_ollama(prompt, system, force_json, max_retries, base_delay)
raise e
except Exception as e:
err_str = str(e)
# Intelligente 429 Erkennung für alle Cloud-Provider
is_rate_limit = any(x in err_str for x in ["429", "RESOURCE_EXHAUSTED", "rate_limited", "Too Many Requests"])
if is_rate_limit and rate_limit_attempts < max_rate_retries:
rate_limit_attempts += 1
logger.warning(
f"⏳ [LLMService] Rate Limit (429) detected from {provider}. "
f"Attempt {rate_limit_attempts}/{max_rate_retries}. "
f"Waiting {wait_time}s before cloud retry..."
)
await asyncio.sleep(wait_time)
continue # Nächster Versuch in der Cloud-Schleife
# Wenn kein Rate-Limit oder Retries erschöpft -> Fallback zu Ollama (falls aktiviert)
if self.settings.LLM_FALLBACK_ENABLED and provider != "ollama":
logger.warning(
f"🔄 Provider {provider} failed ({err_str}). Falling back to LOCAL OLLAMA."
)
return await self._execute_ollama(prompt, system, force_json, max_retries, base_delay)
raise e
async def _execute_google(self, prompt, system, force_json, model_override):
"""Native Google SDK Integration (Gemini)."""
# Nutzt GEMINI_MODEL aus config.py falls kein override übergeben wurde
"""Native Google SDK Integration (Gemini) mit v1 Fix."""
model = model_override or self.settings.GEMINI_MODEL
# Fix: Bereinige Modellnamen (Entfernung von 'models/' Präfix)
clean_model = model.replace("models/", "")
config = types.GenerateContentConfig(
system_instruction=system,
response_mime_type="application/json" if force_json else "text/plain"
)
# SDK Call in Thread auslagern, da die Google API blocking sein kann
response = await asyncio.to_thread(
self.google_client.models.generate_content,
model=model, contents=prompt, config=config
# Thread-Offloading mit striktem Timeout gegen "Hangs"
response = await asyncio.wait_for(
asyncio.to_thread(
self.google_client.models.generate_content,
model=clean_model, contents=prompt, config=config
),
timeout=45.0
)
return response.text.strip()
@ -215,21 +229,11 @@ class LLMService:
system: Optional[str],
force_json: bool,
model_override: Optional[str],
# --- NEW (optional) ---
json_schema: Optional[Dict[str, Any]] = None,
json_schema_name: str = "mindnet_json",
strict_json_schema: bool = True
) -> str:
"""
OpenRouter API Integration (OpenAI-kompatibel).
force_json=True:
- Ohne json_schema -> response_format={"type":"json_object"}
- Mit json_schema -> response_format={"type":"json_schema", "json_schema": {..., "strict": True}}
Wichtig: response_format NICHT als None senden (robuster gegenüber SDK/Provider).
"""
# Nutzt OPENROUTER_MODEL aus config.py
"""OpenRouter API Integration (OpenAI-kompatibel) mit Schema-Support."""
model = model_override or self.settings.OPENROUTER_MODEL
messages = []
if system:
@ -237,7 +241,6 @@ class LLMService:
messages.append({"role": "user", "content": prompt})
kwargs: Dict[str, Any] = {}
if force_json:
if json_schema:
kwargs["response_format"] = {
@ -306,4 +309,4 @@ class LLMService:
async def close(self):
"""Schließt die HTTP-Verbindungen."""
if self.ollama_client:
await self.ollama_client.aclose()
await self.ollama_client.aclose()