153 lines
5.6 KiB
Python
153 lines
5.6 KiB
Python
"""
|
|
app/services/discovery.py
|
|
Service für Link-Vorschläge und Knowledge-Discovery (WP-11).
|
|
"""
|
|
import logging
|
|
from typing import List, Dict, Any, Set
|
|
from qdrant_client.http import models as rest
|
|
|
|
from app.core.qdrant import QdrantConfig, get_client
|
|
from app.models.dto import QueryRequest
|
|
from app.core.retriever import hybrid_retrieve
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
class DiscoveryService:
|
|
def __init__(self, collection_prefix: str = None):
|
|
self.cfg = QdrantConfig.from_env()
|
|
# Prefix Priorität: Argument > Env > Default
|
|
self.prefix = collection_prefix or self.cfg.prefix or "mindnet"
|
|
self.client = get_client(self.cfg)
|
|
|
|
async def analyze_draft(self, text: str, current_type: str) -> Dict[str, Any]:
|
|
"""
|
|
Analysiert einen Draft-Text und schlägt Verlinkungen vor.
|
|
Kombiniert Exact Match (Titel/Alias) und Semantic Match (Vektor).
|
|
"""
|
|
suggestions = []
|
|
|
|
# 1. Exact Match: Finde Begriffe im Text, die als Notiz-Titel existieren
|
|
# (Holt alle Titel aus Qdrant - bei riesigen Vaults später cachen)
|
|
known_entities = self._fetch_all_titles_and_aliases()
|
|
found_entities = self._find_entities_in_text(text, known_entities)
|
|
|
|
existing_target_ids = set()
|
|
|
|
for entity in found_entities:
|
|
existing_target_ids.add(entity["id"])
|
|
suggestions.append({
|
|
"type": "exact_match",
|
|
"text_found": entity["match"],
|
|
"target_title": entity["title"],
|
|
"target_id": entity["id"],
|
|
"confidence": 1.0,
|
|
"reason": "Existierender Notiz-Titel/Alias"
|
|
})
|
|
|
|
# 2. Semantic Match: Finde inhaltlich ähnliche Notizen via Vektor-Suche
|
|
semantic_hits = self._get_semantic_suggestions(text)
|
|
|
|
for hit in semantic_hits:
|
|
# Duplikate vermeiden (wenn wir es schon per Titel gefunden haben)
|
|
if hit.node_id in existing_target_ids:
|
|
continue
|
|
|
|
# Schwellwert: Nur relevante Vorschläge
|
|
# total_score beinhaltet bereits Typ-Gewichte aus dem Retriever
|
|
if hit.total_score > 0.65:
|
|
suggestions.append({
|
|
"type": "semantic_match",
|
|
"text_found": (hit.source.get("text") or "")[:50] + "...",
|
|
"target_title": hit.payload.get("title", "Unbekannt"),
|
|
"target_id": hit.node_id,
|
|
"confidence": round(hit.total_score, 2),
|
|
"reason": f"Inhaltliche Ähnlichkeit (Score: {round(hit.total_score, 2)})"
|
|
})
|
|
|
|
return {
|
|
"draft_length": len(text),
|
|
"suggestions_count": len(suggestions),
|
|
"suggestions": suggestions
|
|
}
|
|
|
|
def _fetch_all_titles_and_aliases(self) -> List[Dict]:
|
|
"""Lädt alle Titel und Aliases aus der Notes-Collection."""
|
|
notes = []
|
|
next_page = None
|
|
col_name = f"{self.prefix}_notes"
|
|
|
|
try:
|
|
while True:
|
|
res, next_page = self.client.scroll(
|
|
collection_name=col_name,
|
|
limit=1000,
|
|
offset=next_page,
|
|
with_payload=True,
|
|
with_vectors=False
|
|
)
|
|
for point in res:
|
|
pl = point.payload or {}
|
|
|
|
# Aliases robust lesen
|
|
aliases = pl.get("aliases") or []
|
|
if isinstance(aliases, str): aliases = [aliases]
|
|
|
|
notes.append({
|
|
"id": pl.get("note_id"),
|
|
"title": pl.get("title"),
|
|
"aliases": aliases
|
|
})
|
|
|
|
if next_page is None:
|
|
break
|
|
except Exception as e:
|
|
logger.error(f"Error fetching titles: {e}")
|
|
return []
|
|
|
|
return notes
|
|
|
|
def _find_entities_in_text(self, text: str, entities: List[Dict]) -> List[Dict]:
|
|
"""
|
|
Sucht Vorkommen von Titeln/Alias im Text (Case-Insensitive).
|
|
"""
|
|
found = []
|
|
text_lower = text.lower()
|
|
|
|
for entity in entities:
|
|
# 1. Titel prüfen
|
|
title = entity.get("title")
|
|
if title and title.lower() in text_lower:
|
|
found.append({
|
|
"match": title,
|
|
"title": title,
|
|
"id": entity["id"]
|
|
})
|
|
continue # Wenn Titel gefunden, Aliases nicht mehr prüfen
|
|
|
|
# 2. Aliases prüfen
|
|
aliases = entity.get("aliases")
|
|
if aliases and isinstance(aliases, list):
|
|
for alias in aliases:
|
|
if alias and str(alias).lower() in text_lower:
|
|
found.append({
|
|
"match": alias,
|
|
"title": title, # Target ist immer der Haupt-Titel
|
|
"id": entity["id"]
|
|
})
|
|
break
|
|
return found
|
|
|
|
def _get_semantic_suggestions(self, text: str):
|
|
"""Wrapper um den Hybrid Retriever."""
|
|
req = QueryRequest(
|
|
query=text,
|
|
top_k=5,
|
|
explain=False
|
|
)
|
|
try:
|
|
# hybrid_retrieve nutzen (sync Wrapper)
|
|
res = hybrid_retrieve(req)
|
|
return res.results
|
|
except Exception as e:
|
|
logger.error(f"Semantic suggestion failed: {e}")
|
|
return [] |