debug raus . aktualisierte chats

This commit is contained in:
Lars 2025-12-08 11:15:16 +01:00
parent b39663408c
commit 7745b71832

View File

@ -2,7 +2,7 @@
app/routers/chat.py RAG Endpunkt (WP-05)
Version:
0.1.1 (Debug-Logging enabled)
0.2.0 (Final Clean Version)
"""
from fastapi import APIRouter, HTTPException, Depends
@ -13,7 +13,6 @@ import logging
from app.models.dto import ChatRequest, ChatResponse, QueryRequest, QueryHit
from app.services.llm_service import LLMService
# Annahme: Der Retriever aus WP-04 liegt hier.
from app.core.retriever import Retriever
router = APIRouter()
@ -32,20 +31,16 @@ def _build_context_from_hits(hits: List[QueryHit]) -> str:
context_parts = []
for i, hit in enumerate(hits, 1):
source = hit.source or {}
# Wir probieren alle möglichen Felder, in denen Text stecken könnte
# Robuster Zugriff auf Content
content = (
source.get("text") or
source.get("content") or
source.get("page_content") or
source.get("chunk_text") or # Oft verwendet
"[[LEERER INHALT - PAYLOAD PRÜFEN]]"
source.get("chunk_text") or
"[Kein Textinhalt verfügbar]"
)
title = hit.note_id or "Unknown Note"
# Debug Log für jeden Hit
logger.info(f"Building Context [{i}]: ID={hit.node_id} Content-Length={len(str(content))}")
entry = (
f"SOURCE [{i}]: {title} (Score: {hit.total_score:.2f})\n"
f"CONTENT: {content}\n"
@ -63,7 +58,8 @@ async def chat_endpoint(
start_time = time.time()
query_id = str(uuid.uuid4())
logger.info(f"Chat request [{query_id}]: {request.message}")
# Minimales Logging für Traceability
logger.info(f"Chat request [{query_id}]: {request.message[:50]}...")
try:
# 1. Retrieval
@ -79,15 +75,13 @@ async def chat_endpoint(
# 2. Kontext bauen
if not hits:
logger.info(f"[{query_id}] No hits found for context.")
logger.info(f"[{query_id}] No hits found.")
context_str = "Keine relevanten Notizen gefunden."
else:
context_str = _build_context_from_hits(hits)
# WICHTIG: Wir loggen den ersten Teil des Kontextes, um zu sehen, was das LLM bekommt
logger.info(f"--- LLM CONTEXT PREVIEW ---\n{context_str[:500]}\n--- END PREVIEW ---")
# 3. LLM Generation
logger.info(f"[{query_id}] Sending to LLM ({len(hits)} context chunks)...")
answer_text = await llm.generate_rag_response(
query=request.message,
context_str=context_str
@ -95,6 +89,7 @@ async def chat_endpoint(
# 4. Response
duration_ms = int((time.time() - start_time) * 1000)
logger.info(f"[{query_id}] Completed in {duration_ms}ms")
return ChatResponse(
query_id=retrieve_result.query_id,