mindnet/tests/test_retriever_weight.py
Lars e93bab6ea7
All checks were successful
Deploy mindnet to llm-node / deploy (push) Successful in 4s
Fassadenauflösung unter app/core
2025-12-28 11:04:40 +01:00

49 lines
1.6 KiB
Python

from app.core.retrieval import retriever as r
from app.models.dto import QueryRequest
import app.core.database.qdrant as qdr
import app.core.database.qdrant_points as qp
import app.services.embeddings_client as ec
class _DummyClient:
pass
def _fake_get_client(cfg):
return _DummyClient()
def _fake_embed_text(text: str):
# Dimension egal, da wir search_chunks_by_vector faken
return [0.0] * 384
def _fake_search_chunks_by_vector_weights(client, prefix, vector, top=10, filters=None):
# Zwei Treffer mit unterschiedlichem retriever_weight
return [
("chunk:low", 0.9, {"note_id": "note:low", "path": "low.md", "section_title": "S1", "retriever_weight": 0.2}),
("chunk:high", 0.8, {"note_id": "note:high", "path": "high.md", "section_title": "S2", "retriever_weight": 1.5}),
]
def test_retriever_weight_influences_ranking(monkeypatch):
# Konfiguration: Qdrant & Embedding faken
monkeypatch.setattr(qdr, "get_client", _fake_get_client)
monkeypatch.setattr(ec, "embed_text", _fake_embed_text)
monkeypatch.setattr(qp, "search_chunks_by_vector", _fake_search_chunks_by_vector_weights)
req = QueryRequest(mode="semantic", query="karate", top_k=2)
resp = r.semantic_retrieve(req)
assert len(resp.results) == 2
first = resp.results[0]
second = resp.results[1]
# Semantic Scores: 0.9 (low), 0.8 (high)
# Total Scores: 0.9 * 0.2 = 0.18 vs. 0.8 * 1.5 = 1.2 -> high sollte oben liegen
assert first.note_id == "note:high"
assert second.note_id == "note:low"
assert first.total_score > second.total_score