debug für chat
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 4s

This commit is contained in:
Lars 2025-12-26 05:21:15 +01:00
parent 83c0c9944d
commit 43d3d8f7f3

View File

@ -1,12 +1,9 @@
"""
FILE: app/routers/chat.py
DESCRIPTION: Haupt-Chat-Interface (RAG & Interview). Enthält Intent-Router (Keywords/LLM) und Prompt-Construction.
VERSION: 2.7.2 (Deep Fallback Edition)
VERSION: 2.7.3 (Debug & Deep Fallback Edition)
STATUS: Active
FIX: Respektiert preferred_provider aus decision_engine.yaml und implementiert
Deep Fallback Logik zur Vermeidung leerer Cloud-Antworten (Silent Refusal).
DEPENDENCIES: app.config, app.models.dto, app.services.llm_service, app.core.retriever, app.services.feedback_service
EXTERNAL_CONFIG: config/decision_engine.yaml, config/types.yaml
FIX: Integriert erweiterte Debug-Logs zur Analyse von Context-Overflow und Silent Refusals.
"""
from fastapi import APIRouter, HTTPException, Depends
@ -313,9 +310,17 @@ async def chat_endpoint(
final_prompt = template.replace("{context_str}", context_str).replace("{query}", request.message)
sources_hits = hits
# --- DEBUG SPOT 1: PROMPT CONSTRUCTION ---
logger.info(f"[{query_id}] PROMPT CONSTRUCTION COMPLETE. Length: {len(final_prompt)} chars.")
if not final_prompt.strip():
logger.error(f"[{query_id}] CRITICAL: Final prompt is empty before sending to LLM!")
# --- GENERATION MIT DEEP FALLBACK ---
system_prompt = llm.get_prompt("system_prompt")
# --- DEBUG SPOT 2: PRIMARY CALL ---
logger.info(f"[{query_id}] PRIMARY CALL: Sending request to provider '{preferred_provider}'...")
# 1. Versuch mit konfiguriertem Provider (z.B. Ollama für EMPATHY)
answer_text = await llm.generate_raw_response(
prompt=final_prompt,
@ -326,7 +331,9 @@ async def chat_endpoint(
# DEEP FALLBACK: Wenn die Antwort leer ist (Silent Refusal in der Cloud)
if not answer_text.strip() and preferred_provider != "ollama":
logger.warning(f"🛑 [{query_id}] Leere Antwort von '{preferred_provider}'. Starte LOKALEN FALLBACK via Ollama...")
# --- DEBUG SPOT 3: FALLBACK TRIGGER ---
logger.warning(f"🛑 [{query_id}] PRIMARY '{preferred_provider}' returned EMPTY. Triggering Deep Fallback to Ollama...")
answer_text = await llm.generate_raw_response(
prompt=final_prompt,
system=system_prompt,