diff --git a/app/frontend/ui.py b/app/frontend/ui.py
new file mode 100644
index 0000000..84043a5
--- /dev/null
+++ b/app/frontend/ui.py
@@ -0,0 +1,239 @@
+import streamlit as st
+import requests
+import uuid
+import os
+import json
+from datetime import datetime
+
+# --- CONFIGURATION ---
+# Default configuration taken from environment or fallback to localhost
+API_BASE_URL = os.getenv("MINDNET_API_URL", "http://localhost:8002")
+CHAT_ENDPOINT = f"{API_BASE_URL}/chat"
+FEEDBACK_ENDPOINT = f"{API_BASE_URL}/feedback"
+
+# --- PAGE SETUP ---
+st.set_page_config(
+ page_title="mindnet v2.3.1",
+ page_icon="🧠",
+ layout="centered"
+)
+
+# Custom CSS for cleaner look
+st.markdown("""
+
+""", unsafe_allow_html=True)
+
+# --- SESSION STATE INITIALIZATION ---
+if "messages" not in st.session_state:
+ st.session_state.messages = []
+if "user_id" not in st.session_state:
+ st.session_state.user_id = str(uuid.uuid4())
+
+# --- API CLIENT FUNCTIONS ---
+
+def send_chat_message(message: str, top_k: int, explain: bool):
+ """Sends the user message to the FastAPI backend."""
+ payload = {
+ "message": message,
+ "top_k": top_k,
+ "explain": explain
+ }
+ try:
+ response = requests.post(CHAT_ENDPOINT, json=payload, timeout=60)
+ response.raise_for_status()
+ return response.json()
+ except requests.exceptions.ConnectionError:
+ return {"error": "Backend nicht erreichbar. Läuft der Server auf Port 8002?"}
+ except Exception as e:
+ return {"error": str(e)}
+
+def send_feedback(query_id: str, score: int):
+ """Sends feedback to the backend."""
+ # Note: We rate the overall answer. API expects node_id.
+ # We use 'generated_answer' as a convention for the full response.
+ payload = {
+ "query_id": query_id,
+ "node_id": "generated_answer",
+ "score": score,
+ "comment": "User feedback via Streamlit UI"
+ }
+ try:
+ requests.post(FEEDBACK_ENDPOINT, json=payload, timeout=5)
+ return True
+ except:
+ return False
+
+# --- UI COMPONENTS ---
+
+def render_sidebar():
+ with st.sidebar:
+ st.header("⚙️ Konfiguration")
+ st.markdown(f"**Backend:** `{API_BASE_URL}`")
+
+ st.markdown("---")
+ st.subheader("Retrieval Settings")
+ top_k = st.slider("Quellen (Top-K)", min_value=1, max_value=10, value=5)
+ explain_mode = st.checkbox("Explanation Layer", value=True, help="Zeigt an, warum Quellen gewählt wurden.")
+
+ st.markdown("---")
+ st.markdown("### 🧠 System Status")
+ st.info(f"**Version:** v2.3.1\n\n**Modules:**\n- Decision Engine: ✅\n- Hybrid Router: ✅\n- Feedback Loop: ✅")
+
+ if st.button("Clear Chat History"):
+ st.session_state.messages = []
+ st.rerun()
+
+ return top_k, explain_mode
+
+def render_intent_badge(intent, source):
+ """Visualizes the Decision Engine state."""
+ icon = "🧠"
+ if intent == "EMPATHY": icon = "❤️"
+ elif intent == "DECISION": icon = "⚖️"
+ elif intent == "CODING": icon = "💻"
+ elif intent == "FACT": icon = "📚"
+
+ return f"""
+
+ {icon} Intent: {intent} ({source})
+
+ """
+
+def render_sources(sources):
+ """Renders the retrieved sources in expandable cards."""
+ if not sources:
+ return
+
+ st.markdown("#### 📚 Verwendete Quellen")
+ for idx, hit in enumerate(sources):
+ score = hit.get('total_score', 0)
+ payload = hit.get('payload', {})
+ note_type = payload.get('type', 'unknown')
+ title = hit.get('note_id', 'Unbekannt')
+
+ # Determine Header Color/Icon based on score
+ score_icon = "🟢" if score > 0.8 else "🟡" if score > 0.5 else "⚪"
+
+ with st.expander(f"{score_icon} {title} (Typ: {note_type}, Score: {score:.2f})"):
+ # Content
+ content = hit.get('source', {}).get('text', 'Kein Text verfügbar.')
+ st.markdown(f"_{content[:300]}..._")
+
+ # Explanation (WP-04b)
+ explanation = hit.get('explanation')
+ if explanation:
+ st.markdown("---")
+ st.caption("**Warum wurde das gefunden?**")
+ reasons = explanation.get('reasons', [])
+ for r in reasons:
+ st.caption(f"- {r.get('message')}")
+
+# --- MAIN APP LOGIC ---
+
+top_k_setting, explain_setting = render_sidebar()
+
+st.title("mindnet v2.3.1")
+st.caption("Lead Frontend Architect Edition | WP-10 Chat Interface")
+
+# 1. Render History
+for msg in st.session_state.messages:
+ with st.chat_message(msg["role"]):
+ if msg["role"] == "assistant":
+ # Render Meta-Data first
+ if "intent" in msg:
+ st.markdown(render_intent_badge(msg["intent"], msg.get("intent_source", "?")), unsafe_allow_html=True)
+
+ st.markdown(msg["content"])
+
+ # Render Sources
+ if "sources" in msg:
+ render_sources(msg["sources"])
+
+ # Render Latency info
+ if "latency_ms" in msg:
+ st.caption(f"⏱️ Antwortzeit: {msg['latency_ms']}ms | Query-ID: `{msg.get('query_id')}`")
+
+ # Render Feedback Controls (Static for history items to prevent re-run issues)
+ # (Note: In a prod app, we would check if feedback was already given)
+
+ else:
+ st.markdown(msg["content"])
+
+# 2. Handle User Input
+if prompt := st.chat_input("Was beschäftigt dich?"):
+ # Add User Message
+ st.session_state.messages.append({"role": "user", "content": prompt})
+ with st.chat_message("user"):
+ st.markdown(prompt)
+
+ # Generate Response
+ with st.chat_message("assistant"):
+ message_placeholder = st.empty()
+ # Placeholder for intent badge
+ status_placeholder = st.empty()
+
+ with st.spinner("Thinking... (Decision Engine Active)"):
+ api_response = send_chat_message(prompt, top_k_setting, explain_setting)
+
+ if "error" in api_response:
+ st.error(api_response["error"])
+ else:
+ # Extract data
+ answer = api_response.get("answer", "")
+ intent = api_response.get("intent", "FACT")
+ source = api_response.get("intent_source", "Unknown")
+ query_id = api_response.get("query_id")
+ hits = api_response.get("sources", [])
+ latency = api_response.get("latency_ms", 0)
+
+ # 1. Show Intent
+ status_placeholder.markdown(render_intent_badge(intent, source), unsafe_allow_html=True)
+
+ # 2. Show Answer
+ message_placeholder.markdown(answer)
+
+ # 3. Show Sources
+ render_sources(hits)
+
+ # 4. Show Latency & Feedback UI
+ st.caption(f"⏱️ {latency}ms | ID: `{query_id}`")
+
+ # Feedback Buttons (Directly here for the *new* message)
+ col1, col2, col3, col4 = st.columns([1,1,1,4])
+ with col1:
+ if st.button("👍", key=f"up_{query_id}"):
+ send_feedback(query_id, 5)
+ st.toast("Feedback gesendet: Positiv!")
+ with col2:
+ if st.button("👎", key=f"down_{query_id}"):
+ send_feedback(query_id, 1)
+ st.toast("Feedback gesendet: Negativ.")
+
+ # Save to history
+ st.session_state.messages.append({
+ "role": "assistant",
+ "content": answer,
+ "intent": intent,
+ "intent_source": source,
+ "sources": hits,
+ "query_id": query_id,
+ "latency_ms": latency
+ })
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index d896285..1828b3d 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,25 +11,23 @@ qdrant-client>=1.15.1
pydantic>=2.11.7
numpy>=2.3.2
-# --- Markdown & Parsing (Hier fehlten Pakete!) ---
+# --- Markdown & Parsing ---
python-frontmatter>=1.1.0
-# WICHTIG: Das fehlte und verursachte den Fehler
markdown-it-py>=3.0.0
-# WICHTIG: Für types.yaml und retriever.yaml
PyYAML>=6.0.2
python-slugify>=8.0.4
# --- KI & Embeddings ---
sentence-transformers>=5.1.0
-# Torch wird meist durch sentence-transformers geholt,
-# aber wir listen es explizit für Stabilität
torch>=2.0.0
# --- Utilities ---
-# WICHTIG: Damit .env Dateien gelesen werden
python-dotenv>=1.1.1
requests>=2.32.5
tqdm>=4.67.1
# --- Testing ---
pytest>=8.4.2
+
+# --- Frontend (WP-10) ---
+streamlit>=1.39.0
\ No newline at end of file