mindnet/app/models/dto.py

164 lines
5.3 KiB
Python

"""
FILE: app/models/dto.py
DESCRIPTION: Pydantic-Modelle (DTOs) für Request/Response Bodies. Definiert das API-Schema.
VERSION: 0.7.0 (WP-25: Multi-Stream & Agentic RAG Support)
STATUS: Active
DEPENDENCIES: pydantic, typing, uuid
"""
from __future__ import annotations
from pydantic import BaseModel, Field
from typing import List, Literal, Optional, Dict, Any
import uuid
# WP-25: Erweiterte Kanten-Typen gemäß neuer decision_engine.yaml
EdgeKind = Literal[
"references", "references_at", "backlink", "next", "prev",
"belongs_to", "depends_on", "related_to", "similar_to",
"caused_by", "derived_from", "based_on", "solves", "blocks",
"uses", "guides", "enforced_by", "implemented_in", "part_of",
"experienced_in", "impacts", "risk_of"
]
# --- Basis-DTOs ---
class NodeDTO(BaseModel):
"""Darstellung eines Knotens (Note oder Chunk) im API-Graph."""
id: str
type: Literal["note", "chunk"]
title: Optional[str] = None
note_id: Optional[str] = None
tags: Optional[List[str]] = None
in_degree: Optional[int] = None
out_degree: Optional[int] = None
score: Optional[float] = None
section_title: Optional[str] = None
section_path: Optional[str] = None
path: Optional[str] = None
class EdgeDTO(BaseModel):
"""Darstellung einer Kante im API-Graph."""
id: str
kind: str
source: str
target: str
weight: float
direction: Literal["out", "in", "undirected"] = "out"
provenance: Optional[Literal["explicit", "rule", "smart", "structure"]] = "explicit"
confidence: float = 1.0
target_section: Optional[str] = None
# --- Request Models ---
class QueryRequest(BaseModel):
"""
Request für /query. Unterstützt Multi-Stream Isolation via filters.
"""
mode: Literal["semantic", "edge", "hybrid"] = "hybrid"
query: Optional[str] = None
query_vector: Optional[List[float]] = None
top_k: int = 10
expand: Dict = {"depth": 1, "edge_types": ["references", "belongs_to", "prev", "next", "depends_on", "related_to"]}
filters: Optional[Dict] = None
ret: Dict = {"with_paths": True, "with_notes": True, "with_chunks": True}
explain: bool = False
# WP-22/25: Dynamische Gewichtung der Graphen-Highways
boost_edges: Optional[Dict[str, float]] = None
class FeedbackRequest(BaseModel):
"""User-Feedback zu einem spezifischen Treffer oder der Gesamtantwort."""
query_id: str = Field(..., description="ID der ursprünglichen Suche")
node_id: str = Field(..., description="ID des bewerteten Treffers oder 'generated_answer'")
score: int = Field(..., ge=1, le=5, description="1 (Irrelevant) bis 5 (Perfekt)")
comment: Optional[str] = None
class ChatRequest(BaseModel):
"""Request für /chat (WP-25 Einstieg)."""
message: str = Field(..., description="Die Nachricht des Users")
conversation_id: Optional[str] = Field(None, description="ID für Chat-Verlauf")
top_k: int = 5
explain: bool = False
# --- Explanation Models ---
class ScoreBreakdown(BaseModel):
"""Aufschlüsselung der Score-Komponenten nach der WP-22 Formel."""
semantic_contribution: float
edge_contribution: float
centrality_contribution: float
raw_semantic: float
raw_edge_bonus: float
raw_centrality: float
node_weight: float
status_multiplier: float = 1.0
graph_boost_factor: float = 1.0
class Reason(BaseModel):
"""Ein semantischer Grund für das Ranking."""
# WP-25: 'status' hinzugefügt für Synchronität mit retriever.py
kind: Literal["semantic", "edge", "type", "centrality", "lifecycle", "status"]
message: str
score_impact: Optional[float] = None
details: Optional[Dict[str, Any]] = None
class Explanation(BaseModel):
"""Container für alle Erklärungsdaten eines Treffers."""
breakdown: ScoreBreakdown
reasons: List[Reason]
related_edges: Optional[List[EdgeDTO]] = None
applied_intent: Optional[str] = None
applied_boosts: Optional[Dict[str, float]] = None
# --- Response Models ---
class QueryHit(BaseModel):
"""Einzelnes Trefferobjekt."""
node_id: str
note_id: str
semantic_score: float
edge_bonus: float
centrality_bonus: float
total_score: float
paths: Optional[List[List[Dict]]] = None
source: Optional[Dict] = None
payload: Optional[Dict] = None
explanation: Optional[Explanation] = None
class QueryResponse(BaseModel):
"""Antwortstruktur für /query (wird von DecisionEngine Streams genutzt)."""
query_id: str = Field(default_factory=lambda: str(uuid.uuid4()))
results: List[QueryHit]
used_mode: str
latency_ms: int
class GraphResponse(BaseModel):
"""Antwortstruktur für /graph/{note_id}."""
center_note_id: str
nodes: List[NodeDTO]
edges: List[EdgeDTO]
stats: Dict[str, int]
class ChatResponse(BaseModel):
"""
Antwortstruktur für /chat.
WP-25: 'intent' spiegelt nun die gewählte Strategie wider.
"""
query_id: str = Field(..., description="Traceability ID")
answer: str = Field(..., description="Generierte Antwort vom LLM")
sources: List[QueryHit] = Field(..., description="Die genutzten Quellen (alle Streams)")
latency_ms: int
intent: Optional[str] = Field("FACT", description="Die gewählte WP-25 Strategie")
intent_source: Optional[str] = Field("LLM_Router", description="Quelle der Intent-Erkennung")