Enhance prompt retrieval in LLMService: Implement detailed trace-logging for prompt lookup hierarchy, improving traceability of model-specific, provider, and global fallback matches. This update refines the logging mechanism to provide clearer insights during prompt resolution.
This commit is contained in:
parent
1563ebbdf9
commit
a9d0874fe9
|
|
@ -101,23 +101,26 @@ class LLMService:
|
|||
|
||||
def get_prompt(self, key: str, model_id: str = None, provider: str = None) -> str:
|
||||
"""
|
||||
WP-25b: Hochpräziser Prompt-Lookup.
|
||||
Hierarchie: Exakte Modell-ID -> Provider-Name -> Globaler Default.
|
||||
WP-25b: Hochpräziser Prompt-Lookup mit detailliertem Trace-Logging.
|
||||
"""
|
||||
data = self.prompts.get(key, "")
|
||||
if not isinstance(data, dict):
|
||||
return str(data)
|
||||
|
||||
# 1. Spezifischstes Match: Exakte Modell-ID (z.B. 'google/gemini-2.0-flash-exp:free')
|
||||
# 1. Spezifischstes Match: Exakte Modell-ID
|
||||
if model_id and model_id in data:
|
||||
logger.info(f"🎯 [PROMPT-TRACE] Level 1 Match: Model-specific ('{model_id}') for key '{key}'")
|
||||
return str(data[model_id])
|
||||
|
||||
# 2. Mittlere Ebene: Provider (z.B. 'ollama' oder 'openrouter')
|
||||
# 2. Mittlere Ebene: Provider
|
||||
if provider and provider in data:
|
||||
logger.info(f"📡 [PROMPT-TRACE] Level 2 Match: Provider-fallback ('{provider}') for key '{key}'")
|
||||
return str(data[provider])
|
||||
|
||||
# 3. Fallback: Bekannte Keys oder Default aus prompts.yaml
|
||||
return str(data.get("default", data.get("gemini", data.get("ollama", ""))))
|
||||
# 3. Globaler Fallback
|
||||
default_val = data.get("default", data.get("gemini", data.get("ollama", "")))
|
||||
logger.info(f"⚓ [PROMPT-TRACE] Level 3 Match: Global Default for key '{key}'")
|
||||
return str(default_val)
|
||||
|
||||
async def generate_raw_response(
|
||||
self,
|
||||
|
|
|
|||
Loading…
Reference in New Issue
Block a user