diff --git a/app/services/llm_service.py b/app/services/llm_service.py index eae30ae..01a0b9d 100644 --- a/app/services/llm_service.py +++ b/app/services/llm_service.py @@ -101,23 +101,26 @@ class LLMService: def get_prompt(self, key: str, model_id: str = None, provider: str = None) -> str: """ - WP-25b: Hochpräziser Prompt-Lookup. - Hierarchie: Exakte Modell-ID -> Provider-Name -> Globaler Default. + WP-25b: Hochpräziser Prompt-Lookup mit detailliertem Trace-Logging. """ data = self.prompts.get(key, "") if not isinstance(data, dict): return str(data) - # 1. Spezifischstes Match: Exakte Modell-ID (z.B. 'google/gemini-2.0-flash-exp:free') + # 1. Spezifischstes Match: Exakte Modell-ID if model_id and model_id in data: + logger.info(f"🎯 [PROMPT-TRACE] Level 1 Match: Model-specific ('{model_id}') for key '{key}'") return str(data[model_id]) - # 2. Mittlere Ebene: Provider (z.B. 'ollama' oder 'openrouter') + # 2. Mittlere Ebene: Provider if provider and provider in data: + logger.info(f"📡 [PROMPT-TRACE] Level 2 Match: Provider-fallback ('{provider}') for key '{key}'") return str(data[provider]) - # 3. Fallback: Bekannte Keys oder Default aus prompts.yaml - return str(data.get("default", data.get("gemini", data.get("ollama", "")))) + # 3. Globaler Fallback + default_val = data.get("default", data.get("gemini", data.get("ollama", ""))) + logger.info(f"⚓ [PROMPT-TRACE] Level 3 Match: Global Default for key '{key}'") + return str(default_val) async def generate_raw_response( self,