diff --git a/app/frontend/ui.py b/app/frontend/ui.py
index dc80376..d166661 100644
--- a/app/frontend/ui.py
+++ b/app/frontend/ui.py
@@ -2,22 +2,17 @@ import streamlit as st
import requests
import uuid
import os
-import json
-from datetime import datetime
+import time
from dotenv import load_dotenv
# --- CONFIGURATION ---
-# Load .env file explicitly to get timeouts and URLs
load_dotenv()
API_BASE_URL = os.getenv("MINDNET_API_URL", "http://localhost:8002")
CHAT_ENDPOINT = f"{API_BASE_URL}/chat"
FEEDBACK_ENDPOINT = f"{API_BASE_URL}/feedback"
-# Timeout strategy:
-# 1. Try MINDNET_API_TIMEOUT (specific for frontend)
-# 2. Try MINDNET_LLM_TIMEOUT (backend setting)
-# 3. Default to 300 seconds (5 minutes) for local inference safety
+# Timeout-Strategie
timeout_setting = os.getenv("MINDNET_API_TIMEOUT") or os.getenv("MINDNET_LLM_TIMEOUT")
API_TIMEOUT = float(timeout_setting) if timeout_setting else 300.0
@@ -28,16 +23,10 @@ st.set_page_config(
layout="centered"
)
-# Custom CSS for cleaner look
st.markdown("""
""", unsafe_allow_html=True)
-# --- SESSION STATE INITIALIZATION ---
+# --- SESSION STATE ---
if "messages" not in st.session_state:
st.session_state.messages = []
if "user_id" not in st.session_state:
st.session_state.user_id = str(uuid.uuid4())
-# --- API CLIENT FUNCTIONS ---
+# --- API FUNCTIONS ---
def send_chat_message(message: str, top_k: int, explain: bool):
- """Sends the user message to the FastAPI backend."""
- payload = {
- "message": message,
- "top_k": top_k,
- "explain": explain
- }
+ payload = {"message": message, "top_k": top_k, "explain": explain}
try:
- # Use the configured timeout from .env
response = requests.post(CHAT_ENDPOINT, json=payload, timeout=API_TIMEOUT)
response.raise_for_status()
return response.json()
except requests.exceptions.ReadTimeout:
- return {"error": f"Timeout: Das Backend hat nicht innerhalb von {int(API_TIMEOUT)} Sekunden geantwortet. (Local LLM is busy)."}
- except requests.exceptions.ConnectionError:
- return {"error": f"Backend nicht erreichbar unter {API_BASE_URL}. Läuft der Server?"}
+ return {"error": f"Timeout ({int(API_TIMEOUT)}s). Das lokale LLM rechnet noch."}
except Exception as e:
return {"error": str(e)}
-def send_feedback(query_id: str, score: int):
- """Sends feedback to the backend."""
- # Note: We rate the overall answer. API expects node_id.
- # We use 'generated_answer' as a convention for the full response.
+def submit_feedback(query_id: str, node_id: str, score: int, comment: str = None):
+ """Sendet Feedback asynchron."""
payload = {
"query_id": query_id,
- "node_id": "generated_answer",
+ "node_id": node_id,
"score": score,
- "comment": "User feedback via Streamlit UI"
+ "comment": comment
}
try:
requests.post(FEEDBACK_ENDPOINT, json=payload, timeout=5)
- return True
- except:
- return False
+ # Wir nutzen st.toast für dezentes Feedback ohne Rerun
+ target = "Antwort" if node_id == "generated_answer" else "Quelle"
+ st.toast(f"Feedback für {target} gespeichert! (Score: {score})")
+ except Exception as e:
+ st.error(f"Feedback-Fehler: {e}")
# --- UI COMPONENTS ---
def render_sidebar():
with st.sidebar:
st.header("⚙️ Konfiguration")
- st.markdown(f"**Backend:** `{API_BASE_URL}`")
- st.caption(f"⏱️ Timeout: {int(API_TIMEOUT)}s")
+ st.caption(f"Backend: `{API_BASE_URL}`")
- st.markdown("---")
- st.subheader("Retrieval Settings")
- top_k = st.slider("Quellen (Top-K)", min_value=1, max_value=10, value=5)
- explain_mode = st.checkbox("Explanation Layer", value=True, help="Zeigt an, warum Quellen gewählt wurden.")
+ st.subheader("Retrieval")
+ top_k = st.slider("Quellen Anzahl", 1, 10, 5)
+ explain_mode = st.toggle("Explanation Layer", value=True)
- st.markdown("---")
- st.markdown("### 🧠 System Status")
- st.info(f"**Version:** v2.3.1\n\n**Modules:**\n- Decision Engine: ✅\n- Hybrid Router: ✅\n- Feedback Loop: ✅")
-
- if st.button("Clear Chat History"):
+ st.divider()
+ st.info("WP-10: Advanced Feedback Loop Active")
+ if st.button("Reset Chat"):
st.session_state.messages = []
st.rerun()
-
return top_k, explain_mode
def render_intent_badge(intent, source):
- """Visualizes the Decision Engine state."""
icon = "🧠"
if intent == "EMPATHY": icon = "❤️"
elif intent == "DECISION": icon = "⚖️"
elif intent == "CODING": icon = "💻"
elif intent == "FACT": icon = "📚"
-
- return f"""
-
- {icon} Intent: {intent} ({source})
-
- """
+ return f"""{icon} Intent: {intent} ({source})
"""
-def render_sources(sources):
- """Renders the retrieved sources in expandable cards."""
+def render_sources(sources, query_id):
+ """
+ Rendert Quellen inklusive granularem Feedback-Mechanismus.
+ """
if not sources:
return
st.markdown("#### 📚 Verwendete Quellen")
+
for idx, hit in enumerate(sources):
score = hit.get('total_score', 0)
+ node_id = hit.get('node_id')
+ title = hit.get('note_id', 'Unbekannt')
payload = hit.get('payload', {})
note_type = payload.get('type', 'unknown')
- title = hit.get('note_id', 'Unbekannt')
- # Determine Header Color/Icon based on score
+ # Icon basierend auf Score
score_icon = "🟢" if score > 0.8 else "🟡" if score > 0.5 else "⚪"
+ expander_title = f"{score_icon} {title} (Typ: {note_type}, Score: {score:.2f})"
- with st.expander(f"{score_icon} {title} (Typ: {note_type}, Score: {score:.2f})"):
- # Content
- content = hit.get('source', {}).get('text', 'Kein Text verfügbar.')
- st.markdown(f"_{content[:300]}..._")
+ with st.expander(expander_title):
+ # 1. Inhalt
+ text = hit.get('source', {}).get('text', 'Kein Text')
+ st.markdown(f"_{text[:300]}..._")
- # Explanation (WP-04b)
- explanation = hit.get('explanation')
- if explanation:
- st.markdown("---")
- st.caption("**Warum wurde das gefunden?**")
- reasons = explanation.get('reasons', [])
- for r in reasons:
+ # 2. Explanation (Why-Layer)
+ if 'explanation' in hit and hit['explanation']:
+ st.caption("**Warum gefunden?**")
+ for r in hit['explanation'].get('reasons', []):
st.caption(f"- {r.get('message')}")
-# --- MAIN APP LOGIC ---
+ # 3. Granulares Feedback (Source Level)
+ st.markdown("---")
+ c1, c2 = st.columns([3, 1])
+ with c1:
+ st.caption("War diese Quelle hilfreich für die Antwort?")
+ with c2:
+ # Callback Wrapper für Source-Feedback
+ def on_source_fb(qid=query_id, nid=node_id, k=f"fb_src_{node_id}"):
+ val = st.session_state.get(k)
+ # Mapping: Thumbs Up (1) -> Score 5, Thumbs Down (0) -> Score 1
+ mapped_score = 5 if val == 1 else 1
+ submit_feedback(qid, nid, mapped_score, comment="Source Feedback via UI")
-top_k_setting, explain_setting = render_sidebar()
+ st.feedback(
+ "thumbs",
+ key=f"fb_src_{query_id}_{node_id}", # Unique Key pro Query/Node
+ on_change=on_source_fb,
+ kwargs={"qid": query_id, "nid": node_id, "k": f"fb_src_{query_id}_{node_id}"}
+ )
+# --- MAIN APP ---
+
+top_k, show_explain = render_sidebar()
st.title("mindnet v2.3.1")
-st.caption("Lead Frontend Architect Edition | WP-10 Chat Interface")
-# 1. Render History
+# 1. Chat History Rendern
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
if msg["role"] == "assistant":
- # Render Meta-Data first
+ # Meta-Daten
if "intent" in msg:
st.markdown(render_intent_badge(msg["intent"], msg.get("intent_source", "?")), unsafe_allow_html=True)
+ # Antwort-Text
st.markdown(msg["content"])
- # Render Sources
+ # Quellen (mit Feedback-Option, aber Status ist readonly für alte Nachrichten in Streamlit oft schwierig,
+ # daher rendern wir Feedback-Controls idealerweise nur für die letzte Nachricht oder speichern Status)
+ # In dieser Version rendern wir sie immer, Streamlit State managed das.
if "sources" in msg:
- render_sources(msg["sources"])
+ render_sources(msg["sources"], msg["query_id"])
- # Render Latency info
- if "latency_ms" in msg:
- st.caption(f"⏱️ Antwortzeit: {msg['latency_ms']}ms | Query-ID: `{msg.get('query_id')}`")
+ # Globales Feedback (Sterne)
+ qid = msg["query_id"]
+
+ def on_global_fb(q=qid, k=f"fb_glob_{qid}"):
+ val = st.session_state.get(k) # Liefert 0-4
+ if val is not None:
+ submit_feedback(q, "generated_answer", val + 1, comment="Global Star Rating")
+
+ st.caption("Wie gut war diese Antwort?")
+ st.feedback(
+ "stars",
+ key=f"fb_glob_{qid}",
+ on_change=on_global_fb
+ )
else:
st.markdown(msg["content"])
-# 2. Handle User Input
-if prompt := st.chat_input("Was beschäftigt dich?"):
- # Add User Message
+# 2. User Input
+if prompt := st.chat_input("Deine Frage an das System..."):
+ # User Message anzeigen
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
- # Generate Response
+ # API Call
with st.chat_message("assistant"):
- message_placeholder = st.empty()
- status_placeholder = st.empty()
+ with st.spinner("Thinking..."):
+ resp = send_chat_message(prompt, top_k, show_explain)
- with st.spinner("Thinking... (Decision Engine Active)"):
- api_response = send_chat_message(prompt, top_k_setting, explain_setting)
-
- if "error" in api_response:
- st.error(api_response["error"])
+ if "error" in resp:
+ st.error(resp["error"])
else:
- # Extract data
- answer = api_response.get("answer", "")
- intent = api_response.get("intent", "FACT")
- source = api_response.get("intent_source", "Unknown")
- query_id = api_response.get("query_id")
- hits = api_response.get("sources", [])
- latency = api_response.get("latency_ms", 0)
+ # Daten extrahieren
+ answer = resp.get("answer", "")
+ intent = resp.get("intent", "FACT")
+ source = resp.get("intent_source", "Unknown")
+ query_id = resp.get("query_id")
+ hits = resp.get("sources", [])
+
+ # Sofort rendern (damit User nicht auf Rerun warten muss)
+ st.markdown(render_intent_badge(intent, source), unsafe_allow_html=True)
+ st.markdown(answer)
+ render_sources(hits, query_id)
+
+ # Feedback Slot für die NEUE Nachricht vorbereiten
+ st.caption("Wie gut war diese Antwort?")
+ st.feedback("stars", key=f"fb_glob_{query_id}", on_change=lambda: submit_feedback(query_id, "generated_answer", st.session_state[f"fb_glob_{query_id}"] + 1))
- # 1. Show Intent
- status_placeholder.markdown(render_intent_badge(intent, source), unsafe_allow_html=True)
-
- # 2. Show Answer
- message_placeholder.markdown(answer)
-
- # 3. Show Sources
- render_sources(hits)
-
- # 4. Show Latency & Feedback UI
- st.caption(f"⏱️ {latency}ms | ID: `{query_id}`")
-
- # Feedback Buttons
- col1, col2, col3, col4 = st.columns([1,1,1,4])
- with col1:
- if st.button("👍", key=f"up_{query_id}"):
- send_feedback(query_id, 5)
- st.toast("Feedback gesendet: Positiv!")
- with col2:
- if st.button("👎", key=f"down_{query_id}"):
- send_feedback(query_id, 1)
- st.toast("Feedback gesendet: Negativ.")
-
- # Save to history
+ # In History speichern
st.session_state.messages.append({
"role": "assistant",
"content": answer,
"intent": intent,
"intent_source": source,
"sources": hits,
- "query_id": query_id,
- "latency_ms": latency
+ "query_id": query_id
})
\ No newline at end of file
diff --git a/app/models/dto.py b/app/models/dto.py
index 85767f8..22e4cff 100644
--- a/app/models/dto.py
+++ b/app/models/dto.py
@@ -6,7 +6,7 @@ Zweck:
WP-06 Update: Intent & Intent-Source in ChatResponse.
Version:
- 0.6.1 (WP-06: Decision Engine Transparency)
+ 0.6.2 (WP-06: Decision Engine Transparency, Erweiterung des Feeback Request)
Stand:
2025-12-09
"""
@@ -64,11 +64,14 @@ class QueryRequest(BaseModel):
class FeedbackRequest(BaseModel):
"""
- User-Feedback zu einem spezifischen Treffer.
+ User-Feedback zu einem spezifischen Treffer oder der Gesamtantwort.
"""
query_id: str = Field(..., description="ID der ursprünglichen Suche")
- node_id: str = Field(..., description="ID des bewerteten Treffers")
- score: int = Field(..., ge=0, le=1, description="1 (Positiv) oder 0 (Negativ/Irrelevant)")
+ # node_id ist optional: Wenn leer oder "generated_answer", gilt es für die Antwort.
+ # Wenn eine echte Chunk-ID, gilt es für die Quelle.
+ node_id: str = Field(..., description="ID des bewerteten Treffers oder 'generated_answer'")
+ # Update: Range auf 1-5 erweitert für differenziertes Tuning
+ score: int = Field(..., ge=1, le=5, description="1 (Irrelevant/Falsch) bis 5 (Perfekt)")
comment: Optional[str] = None
@@ -152,4 +155,6 @@ class ChatResponse(BaseModel):
sources: List[QueryHit] = Field(..., description="Die für die Antwort genutzten Quellen")
latency_ms: int
intent: Optional[str] = Field("FACT", description="WP-06: Erkannter Intent (FACT/DECISION)")
- intent_source: Optional[str] = Field("Unknown", description="WP-06: Quelle der Intent-Erkennung (Keyword vs. LLM)")
\ No newline at end of file
+ intent_source: Optional[str] = Field("Unknown", description="WP-06: Quelle der Intent-Erkennung (Keyword vs. LLM)")
+
+
\ No newline at end of file