mindnet/tests/test_WP22_intelligence.py
Lars e93bab6ea7
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 4s
Fassadenauflösung unter app/core
2025-12-28 11:04:40 +01:00

145 lines
6.4 KiB
Python

"""
FILE: tests/test_WP22_intelligence.py
DESCRIPTION: Integrationstest für WP-22.
FIX: Erzwingt Pfad-Synchronisation für Registry & Router. Behebt Pydantic Validation Errors.
"""
import unittest
import os
import shutil
import yaml
import asyncio
from unittest.mock import MagicMock, patch, AsyncMock
# --- Modul-Caching Fix: Wir müssen Caches leeren ---
import app.routers.chat
from app.models.dto import ChatRequest, QueryHit, QueryRequest
from app.services.edge_registry import EdgeRegistry
from app.core.retrieval.retriever_scoring import compute_wp22_score, get_status_multiplier
from app.routers.chat import _classify_intent, get_decision_strategy, chat_endpoint
class TestWP22Integration(unittest.IsolatedAsyncioTestCase):
async def asyncSetUp(self):
"""Bereitet eine isolierte Test-Umgebung vor."""
# Wir simulieren hier 'vault_master' (oder venv_master) als Verzeichnis
self.test_root = os.path.abspath("tests/temp_wp22")
self.test_vault = os.path.join(self.test_root, "vault_master")
self.test_config_dir = os.path.join(self.test_root, "config")
# 1. Pfade erstellen
os.makedirs(os.path.join(self.test_vault, "01_User_Manual"), exist_ok=True)
os.makedirs(self.test_config_dir, exist_ok=True)
os.makedirs(os.path.join(self.test_root, "data/logs"), exist_ok=True)
# 2. Config Files schreiben (MOCK CONFIG)
self.decision_path = os.path.join(self.test_config_dir, "decision_engine.yaml")
self.decision_config = {
"strategies": {
"FACT": {
"trigger_keywords": ["was ist"],
"edge_boosts": {"part_of": 2.0}
},
"CAUSAL": {
"trigger_keywords": ["warum"],
"edge_boosts": {"caused_by": 3.0}
}
}
}
with open(self.decision_path, "w", encoding="utf-8") as f:
yaml.dump(self.decision_config, f)
# 3. Vocabulary File am RICHTIGEN Ort relativ zum test_vault
self.vocab_path = os.path.join(self.test_vault, "01_User_Manual/01_edge_vocabulary.md")
with open(self.vocab_path, "w", encoding="utf-8") as f:
f.write("| System-Typ | Aliases |\n| :--- | :--- |\n| **caused_by** | ursache_ist |\n| **part_of** | teil_von |")
# 4. MOCKING / RESETTING GLOBAL STATE
# Zwinge get_settings, unsere Test-Pfade zurückzugeben
self.mock_settings = MagicMock()
self.mock_settings.DECISION_CONFIG_PATH = self.decision_path
self.mock_settings.MINDNET_VAULT_ROOT = self.test_vault
self.mock_settings.RETRIEVER_TOP_K = 5
self.mock_settings.MODEL_NAME = "test-model"
# Patching get_settings in allen relevanten Modulen
self.patch_settings_chat = patch('app.routers.chat.get_settings', return_value=self.mock_settings)
self.patch_settings_registry = patch('app.services.edge_registry.get_settings', return_value=self.mock_settings)
self.patch_settings_chat.start()
self.patch_settings_registry.start()
# Caches zwingend leeren
app.routers.chat._DECISION_CONFIG_CACHE = None
# Registry Singleton Reset & Force Init mit Test-Pfad
EdgeRegistry._instance = None
self.registry = EdgeRegistry(vault_root=self.test_vault)
self.registry.unknown_log_path = os.path.join(self.test_root, "data/logs/unknown.jsonl")
async def asyncTearDown(self):
self.patch_settings_chat.stop()
self.patch_settings_registry.stop()
if os.path.exists(self.test_root):
shutil.rmtree(self.test_root)
EdgeRegistry._instance = None
app.routers.chat._DECISION_CONFIG_CACHE = None
def test_registry_resolution(self):
print("\n🔵 TEST 1: Registry Pfad & Alias Resolution")
# Prüfen ob die Datei gefunden wurde
self.assertTrue(len(self.registry.valid_types) > 0, f"Registry leer! Root: {self.registry.vault_root}")
self.assertEqual(self.registry.resolve("ursache_ist"), "caused_by")
print("✅ Registry OK.")
def test_scoring_math(self):
print("\n🔵 TEST 2: Scoring Math (Lifecycle)")
with patch("app.core.retrieval.retriever_scoring.get_weights", return_value=(1.0, 1.0, 0.0)):
# Stable (1.2)
self.assertEqual(get_status_multiplier({"status": "stable"}), 1.2)
# Draft (0.5)
self.assertEqual(get_status_multiplier({"status": "draft"}), 0.5)
# Scoring Formel Test: BaseScore * (1 + ConfigWeight + DynamicBoost)
# BaseScore = 0.5 (sem) * 1.2 (stable) = 0.6
# ConfigWeight = 1.0 (neutral) - 1.0 = 0.0
# DynamicBoost = (1.0 * 0.5) = 0.5
# Total = 0.6 * (1 + 0 + 0.5) = 0.9
result = compute_wp22_score(0.5, {"status": "stable", "retriever_weight": 1.0}, edge_bonus_raw=0.5)
self.assertAlmostEqual(result["total"], 0.9)
print("✅ Scoring OK.")
async def test_router_intent(self):
print("\n🔵 TEST 3: Intent Classification")
mock_llm = MagicMock()
intent, _ = await _classify_intent("Warum ist das so?", mock_llm)
self.assertEqual(intent, "CAUSAL")
print("✅ Routing OK.")
async def test_full_flow(self):
print("\n🔵 TEST 4: End-to-End Pipeline & Dynamic Boosting")
mock_llm = AsyncMock()
mock_llm.prompts = {}
mock_llm.generate_raw_response.return_value = "Test Antwort"
mock_retriever = AsyncMock()
# Fix note_id für Pydantic Validation
mock_hit = QueryHit(
node_id="c1", note_id="test_note_n1", semantic_score=0.8, edge_bonus=0.0,
centrality_bonus=0.0, total_score=0.8, source={"text": "t"},
payload={"status": "active", "type": "concept"}
)
mock_retriever.search.return_value.results = [mock_hit]
req = ChatRequest(message="Warum ist das passiert?", top_k=1)
resp = await chat_endpoint(req, llm=mock_llm, retriever=mock_retriever)
# Verify Intent
self.assertEqual(resp.intent, "CAUSAL")
# Verify Boosts Reached Retriever
called_req = mock_retriever.search.call_args[0][0]
self.assertEqual(called_req.boost_edges.get("caused_by"), 3.0)
print("✅ Full Flow & Boosting OK.")
if __name__ == '__main__':
unittest.main()