erste Version WP05

This commit is contained in:
Lars 2025-12-08 10:32:57 +01:00
parent fbaa07ba88
commit 90f0be6baf
7 changed files with 320 additions and 40 deletions

View File

@ -2,16 +2,15 @@
app/config.py zentrale Konfiguration (ENV Settings) app/config.py zentrale Konfiguration (ENV Settings)
Version: Version:
0.2.0 (WP-04: Retriever-Gewichte & Defaults ergänzt; keine Verhaltensänderung für bestehende Nutzung) 0.3.0 (WP-05: LLM & Chat Config)
Stand: Stand:
2025-10-06 2025-12-08
Hinweis:
Bestehende Attribute bleiben erhalten; neue WP-04 Felder sind optional.
""" """
from __future__ import annotations from __future__ import annotations
import os import os
from functools import lru_cache from functools import lru_cache
from pathlib import Path
class Settings: class Settings:
# Qdrant # Qdrant
@ -20,11 +19,19 @@ class Settings:
COLLECTION_PREFIX: str = os.getenv("MINDNET_PREFIX", "mindnet") COLLECTION_PREFIX: str = os.getenv("MINDNET_PREFIX", "mindnet")
VECTOR_SIZE: int = int(os.getenv("MINDNET_VECTOR_SIZE", "384")) VECTOR_SIZE: int = int(os.getenv("MINDNET_VECTOR_SIZE", "384"))
DISTANCE: str = os.getenv("MINDNET_DISTANCE", "Cosine") DISTANCE: str = os.getenv("MINDNET_DISTANCE", "Cosine")
# Embeddings # Embeddings
MODEL_NAME: str = os.getenv("MINDNET_MODEL", "sentence-transformers/all-MiniLM-L6-v2") MODEL_NAME: str = os.getenv("MINDNET_MODEL", "sentence-transformers/all-MiniLM-L6-v2")
# WP-05 LLM / Ollama
OLLAMA_URL: str = os.getenv("MINDNET_OLLAMA_URL", "http://127.0.0.1:11434")
LLM_MODEL: str = os.getenv("MINDNET_LLM_MODEL", "llama3") # oder 'mistral', 'gemma'
PROMPTS_PATH: str = os.getenv("MINDNET_PROMPTS_PATH", "config/prompts.yaml")
# API # API
DEBUG: bool = os.getenv("DEBUG", "false").lower() == "true" DEBUG: bool = os.getenv("DEBUG", "false").lower() == "true"
# WP-04 Retriever Defaults (optional; können per ENV überschrieben werden)
# WP-04 Retriever Defaults
RETRIEVER_W_SEM: float = float(os.getenv("MINDNET_WP04_W_SEM", "0.70")) RETRIEVER_W_SEM: float = float(os.getenv("MINDNET_WP04_W_SEM", "0.70"))
RETRIEVER_W_EDGE: float = float(os.getenv("MINDNET_WP04_W_EDGE", "0.25")) RETRIEVER_W_EDGE: float = float(os.getenv("MINDNET_WP04_W_EDGE", "0.25"))
RETRIEVER_W_CENT: float = float(os.getenv("MINDNET_WP04_W_CENT", "0.05")) RETRIEVER_W_CENT: float = float(os.getenv("MINDNET_WP04_W_CENT", "0.05"))
@ -34,4 +41,4 @@ class Settings:
@lru_cache @lru_cache
def get_settings() -> Settings: def get_settings() -> Settings:
return Settings() return Settings()

View File

@ -10,8 +10,9 @@ from .routers.qdrant_router import router as qdrant_router
from .routers.query import router as query_router from .routers.query import router as query_router
from .routers.graph import router as graph_router from .routers.graph import router as graph_router
from .routers.tools import router as tools_router from .routers.tools import router as tools_router
# NEU: Feedback Router
from .routers.feedback import router as feedback_router from .routers.feedback import router as feedback_router
# NEU: Chat Router (WP-05)
from .routers.chat import router as chat_router
try: try:
from .routers.admin import router as admin_router from .routers.admin import router as admin_router
@ -19,7 +20,7 @@ except Exception:
admin_router = None admin_router = None
def create_app() -> FastAPI: def create_app() -> FastAPI:
app = FastAPI(title="mindnet API", version="0.4.3") # Version bump app = FastAPI(title="mindnet API", version="0.5.0") # Version bump WP-05
s = get_settings() s = get_settings()
@app.get("/healthz") @app.get("/healthz")
@ -32,8 +33,10 @@ def create_app() -> FastAPI:
app.include_router(query_router, prefix="/query", tags=["query"]) app.include_router(query_router, prefix="/query", tags=["query"])
app.include_router(graph_router, prefix="/graph", tags=["graph"]) app.include_router(graph_router, prefix="/graph", tags=["graph"])
app.include_router(tools_router, prefix="/tools", tags=["tools"]) app.include_router(tools_router, prefix="/tools", tags=["tools"])
# NEU:
app.include_router(feedback_router, prefix="/feedback", tags=["feedback"]) app.include_router(feedback_router, prefix="/feedback", tags=["feedback"])
# NEU: Chat Endpoint
app.include_router(chat_router, prefix="/chat", tags=["chat"])
if admin_router: if admin_router:
app.include_router(admin_router, prefix="/admin", tags=["admin"]) app.include_router(admin_router, prefix="/admin", tags=["admin"])

View File

@ -1,17 +1,14 @@
""" """
app/models/dto.py Pydantic-Modelle (DTOs) für WP-04 Endpunkte app/models/dto.py Pydantic-Modelle (DTOs) für WP-04/WP-05 Endpunkte
Zweck: Zweck:
Laufzeit-Modelle für FastAPI (Requests/Responses), getrennt von JSON-Schemas. Laufzeit-Modelle für FastAPI (Requests/Responses).
Deckt die Graph-/Retriever-Endpunkte ab. WP-05 Update: Chat-Modelle.
Enthält Erweiterungen für WP-04b (Explanation Layer) und WP-04c (Feedback).
Kompatibilität:
Python 3.12+, Pydantic 2.x, FastAPI 0.110+
Version: Version:
0.3.0 (Update für WP-04c Feedback) 0.4.0 (Update für WP-05 Chat)
Stand: Stand:
2025-12-07 2025-12-08
""" """
from __future__ import annotations from __future__ import annotations
@ -42,7 +39,7 @@ class NodeDTO(BaseModel):
class EdgeDTO(BaseModel): class EdgeDTO(BaseModel):
"""Darstellung einer Kante im API-Graph.""" """Darstellung einer Kante im API-Graph."""
id: str id: str
kind: str # String statt Literal, um flexibel für Custom-Types zu bleiben kind: str
source: str source: str
target: str target: str
weight: float weight: float
@ -53,11 +50,7 @@ class EdgeDTO(BaseModel):
class QueryRequest(BaseModel): class QueryRequest(BaseModel):
""" """
Request für /query: Request für /query.
- mode: 'semantic' | 'edge' | 'hybrid'
- query: (optional) Freitext
- query_vector: (optional) direkter Vektor
- explain: (optional) Fordert detaillierte Erklärungen an (WP-04b)
""" """
mode: Literal["semantic", "edge", "hybrid"] = "hybrid" mode: Literal["semantic", "edge", "hybrid"] = "hybrid"
query: Optional[str] = None query: Optional[str] = None
@ -71,7 +64,7 @@ class QueryRequest(BaseModel):
class FeedbackRequest(BaseModel): class FeedbackRequest(BaseModel):
""" """
User-Feedback zu einem spezifischen Treffer (WP-04c). User-Feedback zu einem spezifischen Treffer.
""" """
query_id: str = Field(..., description="ID der ursprünglichen Suche") query_id: str = Field(..., description="ID der ursprünglichen Suche")
node_id: str = Field(..., description="ID des bewerteten Treffers") node_id: str = Field(..., description="ID des bewerteten Treffers")
@ -79,19 +72,28 @@ class FeedbackRequest(BaseModel):
comment: Optional[str] = None comment: Optional[str] = None
class ChatRequest(BaseModel):
"""
WP-05: Request für /chat.
"""
message: str = Field(..., description="Die Nachricht des Users")
conversation_id: Optional[str] = Field(None, description="Optional: ID für Chat-Verlauf (noch nicht implementiert)")
# RAG Parameter (Override defaults)
top_k: int = 5
explain: bool = False
# --- WP-04b Explanation Models --- # --- WP-04b Explanation Models ---
class ScoreBreakdown(BaseModel): class ScoreBreakdown(BaseModel):
"""Aufschlüsselung der Score-Komponenten.""" """Aufschlüsselung der Score-Komponenten."""
semantic_contribution: float = Field(..., description="W_sem * semantic_score * weight") semantic_contribution: float
edge_contribution: float = Field(..., description="W_edge * edge_bonus") edge_contribution: float
centrality_contribution: float = Field(..., description="W_cent * centrality_bonus") centrality_contribution: float
# Rohwerte
raw_semantic: float raw_semantic: float
raw_edge_bonus: float raw_edge_bonus: float
raw_centrality: float raw_centrality: float
node_weight: float = Field(..., description="Typ-Gewicht (retriever_weight)") node_weight: float
class Reason(BaseModel): class Reason(BaseModel):
@ -115,25 +117,17 @@ class QueryHit(BaseModel):
"""Einzelnes Trefferobjekt für /query.""" """Einzelnes Trefferobjekt für /query."""
node_id: str node_id: str
note_id: Optional[str] note_id: Optional[str]
# Flache Scores
semantic_score: float semantic_score: float
edge_bonus: float edge_bonus: float
centrality_bonus: float centrality_bonus: float
total_score: float total_score: float
paths: Optional[List[List[Dict]]] = None paths: Optional[List[List[Dict]]] = None
source: Optional[Dict] = None source: Optional[Dict] = None
# WP-04b: Erklärungsobjekt
explanation: Optional[Explanation] = None explanation: Optional[Explanation] = None
class QueryResponse(BaseModel): class QueryResponse(BaseModel):
""" """Antwortstruktur für /query."""
Antwortstruktur für /query (Liste von Treffern + Telemetrie).
Enthält query_id für Traceability (WP-04c).
"""
query_id: str = Field(default_factory=lambda: str(uuid.uuid4())) query_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
results: List[QueryHit] results: List[QueryHit]
used_mode: str used_mode: str
@ -145,4 +139,14 @@ class GraphResponse(BaseModel):
center_note_id: str center_note_id: str
nodes: List[NodeDTO] nodes: List[NodeDTO]
edges: List[EdgeDTO] edges: List[EdgeDTO]
stats: Dict[str, int] stats: Dict[str, int]
class ChatResponse(BaseModel):
"""
WP-05: Antwortstruktur für /chat.
"""
query_id: str = Field(..., description="Traceability ID (dieselbe wie für Search)")
answer: str = Field(..., description="Generierte Antwort vom LLM")
sources: List[QueryHit] = Field(..., description="Die für die Antwort genutzten Quellen")
latency_ms: int

112
app/routers/chat.py Normal file
View File

@ -0,0 +1,112 @@
"""
app/routers/chat.py RAG Endpunkt (WP-05)
Zweck:
Verbindet Retrieval (WP-04) mit LLM-Generation (WP-05).
1. Empfängt User-Frage.
2. Sucht relevante Chunks (Retriever).
3. Baut Kontext-String.
4. Generiert Antwort via Ollama.
Version:
0.1.0
"""
from fastapi import APIRouter, HTTPException, Depends
from typing import List
import time
import uuid
import logging
from app.models.dto import ChatRequest, ChatResponse, QueryRequest, QueryHit
from app.services.llm_service import LLMService
# Annahme: Der Retriever aus WP-04 liegt hier.
# Falls Import-Fehler: Bitte Pfad prüfen (z.B. app.services.retriever oder app.core.retriever)
from app.core.retriever import Retriever
router = APIRouter()
logger = logging.getLogger(__name__)
# Dependency für Services (Singletons oder Factory wäre sauberer, hier pragmatisch instanziiert)
def get_llm_service():
return LLMService()
def get_retriever():
return Retriever()
def _build_context_from_hits(hits: List[QueryHit]) -> str:
"""
Formatiert die Suchtreffer zu einem String für den Prompt.
Extrahiert Text aus hit.source (wo der Chunk-Inhalt liegt).
"""
context_parts = []
for i, hit in enumerate(hits, 1):
# Wir versuchen, den Text aus verschiedenen gängigen Feldern zu holen
source = hit.source or {}
content = source.get("text") or source.get("content") or "No text content available."
title = hit.note_id or "Unknown Note"
# Formatierung:
# [1] Titel der Notiz (Score: 0.85)
# Inhalt...
entry = (
f"SOURCE [{i}]: {title} (Score: {hit.total_score:.2f})\n"
f"CONTENT: {content}\n"
)
context_parts.append(entry)
return "\n---\n".join(context_parts)
@router.post("/", response_model=ChatResponse)
async def chat_endpoint(
request: ChatRequest,
llm: LLMService = Depends(get_llm_service),
retriever: Retriever = Depends(get_retriever)
):
start_time = time.time()
query_id = str(uuid.uuid4())
logger.info(f"Chat request [{query_id}]: {request.message}")
try:
# 1. Retrieval: Wir nutzen den existierenden Retriever
# Wir mappen ChatRequest auf QueryRequest (WP-04 Logik)
query_req = QueryRequest(
query=request.message,
mode="hybrid", # Hybrid ist am robustesten für RAG
top_k=request.top_k,
explain=request.explain # Traceability weitergeben
)
# Retrieval ausführen (retriever.search erwartet QueryRequest)
# Hinweis: retrieve_result ist vom Typ QueryResponse (aus DTO)
retrieve_result = await retriever.search(query_req)
hits = retrieve_result.results
# 2. Kontext bauen
if not hits:
logger.info(f"[{query_id}] No hits found for context.")
context_str = "Keine relevanten Notizen gefunden."
else:
context_str = _build_context_from_hits(hits)
# 3. LLM Generation
logger.info(f"[{query_id}] Generating answer with {len(hits)} context chunks...")
answer_text = await llm.generate_rag_response(
query=request.message,
context_str=context_str
)
# 4. Response bauen
duration_ms = int((time.time() - start_time) * 1000)
return ChatResponse(
query_id=retrieve_result.query_id, # Wir nutzen die ID vom Retriever für Konsistenz
answer=answer_text,
sources=hits,
latency_ms=duration_ms
)
except Exception as e:
logger.error(f"Error in chat endpoint: {e}", exc_info=True)
raise HTTPException(status_code=500, detail=str(e))

View File

@ -0,0 +1,79 @@
"""
app/services/llm_service.py LLM Client (Ollama)
Zweck:
Kapselt die Kommunikation mit der Ollama API.
Lädt Prompts & Templates aus der YAML-Config.
Version:
0.1.0 (WP-05 Init)
"""
import httpx
import yaml
import logging
import os
from pathlib import Path
from app.config import get_settings
logger = logging.getLogger(__name__)
class LLMService:
def __init__(self):
self.settings = get_settings()
self.prompts = self._load_prompts()
self.client = httpx.AsyncClient(base_url=self.settings.OLLAMA_URL, timeout=60.0)
def _load_prompts(self) -> dict:
"""Lädt Prompts aus der konfigurierten YAML-Datei."""
path = Path(self.settings.PROMPTS_PATH)
if not path.exists():
# Fallback, falls Datei noch nicht existiert (Dev-Schutz)
logger.warning(f"Prompt config not found at {path}, using defaults.")
return {
"system_prompt": "You are a helpful AI assistant.",
"rag_template": "Context: {context_str}\nQuestion: {query}"
}
try:
with open(path, "r", encoding="utf-8") as f:
return yaml.safe_load(f)
except Exception as e:
logger.error(f"Failed to load prompts: {e}")
return {}
async def generate_rag_response(self, query: str, context_str: str) -> str:
"""
Generiert eine Antwort basierend auf Query und Kontext.
"""
system_prompt = self.prompts.get("system_prompt", "")
template = self.prompts.get("rag_template", "{context_str}\n\n{query}")
# Template füllen
final_prompt = template.format(context_str=context_str, query=query)
payload = {
"model": self.settings.LLM_MODEL,
"system": system_prompt,
"prompt": final_prompt,
"stream": False, # Vorerst kein Streaming für einfacheres Handling
"options": {
"temperature": 0.7,
"num_ctx": 4096
}
}
try:
response = await self.client.post("/api/generate", json=payload)
response.raise_for_status()
data = response.json()
return data.get("response", "")
except httpx.HTTPError as e:
logger.error(f"Ollama API Error: {e}")
return f"Fehler bei der Generierung der Antwort: {str(e)}"
except Exception as e:
logger.error(f"LLM Service Error: {e}")
return "Ein unerwarteter Fehler ist aufgetreten."
async def close(self):
await self.client.aclose()

31
config/prompts.yaml Normal file
View File

@ -0,0 +1,31 @@
# config/prompts.yaml — Persönlichkeit & Templates für mindnet
# Version: 1.0
system_prompt: |
Du bist 'mindnet', ein persönliches KI-Gedächtnis und ein digitaler Zwilling.
Deine Identität:
- Du bist hilfreich, präzise und reflektiert.
- Du erfindest keine Fakten (Halluzinationen vermeiden).
- Du basierst deine Antworten streng auf dem bereitgestellten KONTEXT (Auszüge aus Notizen).
- Wenn der Kontext die Antwort nicht enthält, gib das offen zu.
- Du zitierst Quellen implizit durch Nennung der Notiz-Titel, wenn sinnvoll.
Dein Stil:
- Du antwortest in der Sprache des Users (meist Deutsch).
- Du bist 'Ich' (das Gedächtnis) und sprichst den User als 'Du' (der Besitzer) an.
- Formattiere Antworten mit Markdown (fett, listen), um Lesbarkeit zu erhöhen.
rag_template: |
HINTERGRUNDWISSEN (KONTEXT):
---------------------
{context_str}
---------------------
FRAGE DES USERS:
{query}
ANWEISUNG:
Beantworte die Frage basierend auf dem oben stehenden Kontext.
Wenn du dich auf eine spezifische Notiz beziehst, erwähne ihren Titel.
Erkläre Zusammenhänge, falls im Kontext ersichtlich.

44
tests/test_chat_wp05.py Normal file
View File

@ -0,0 +1,44 @@
import requests
import json
import sys
# Konfiguration
API_URL = "http://localhost:8002/chat/" # Port ggf. anpassen
QUESTION = "Was ist das Ziel von mindnet?" # Eine Frage, zu der du Notizen hast
def test_chat():
payload = {
"message": QUESTION,
"top_k": 3,
"explain": True
}
print(f"Sending Question: '{QUESTION}'...")
try:
response = requests.post(API_URL, json=payload)
response.raise_for_status()
data = response.json()
print("\n=== RESPONSE ===")
print(data["answer"])
print("================\n")
print(f"Query ID: {data['query_id']}")
print(f"Latency: {data['latency_ms']}ms")
print("\nUsed Sources:")
for source in data["sources"]:
score = source.get("total_score", 0)
note = source.get("note_id", "unknown")
print(f"- {note} (Score: {score:.3f})")
except requests.exceptions.ConnectionError:
print("Error: Could not connect to API. Is it running on port 8002?")
except Exception as e:
print(f"Error: {e}")
if 'response' in locals():
print(response.text)
if __name__ == "__main__":
test_chat()