""" app/services/discovery.py Updated for WP-11: Sliding Window Analysis. """ import logging import asyncio from typing import List, Dict, Any import yaml from app.core.qdrant import QdrantConfig, get_client from app.models.dto import QueryRequest from app.core.retriever import hybrid_retrieve logger = logging.getLogger(__name__) class DiscoveryService: def __init__(self, collection_prefix: str = None): self.cfg = QdrantConfig.from_env() self.prefix = collection_prefix or self.cfg.prefix or "mindnet" self.client = get_client(self.cfg) self.registry = self._load_type_registry() async def analyze_draft(self, text: str, current_type: str) -> Dict[str, Any]: """ Analysiert den Draft mit Sliding Window Strategie. """ suggestions = [] default_edge_type = self._get_default_edge_type(current_type) # 1. Exact Match (Läuft über gesamten Text, schnell genug) known_entities = self._fetch_all_titles_and_aliases() found_entities = self._find_entities_in_text(text, known_entities) existing_target_ids = set() for entity in found_entities: existing_target_ids.add(entity["id"]) suggestions.append({ "type": "exact_match", "text_found": entity["match"], "target_title": entity["title"], "target_id": entity["id"], "suggested_edge_type": default_edge_type, "suggested_markdown": f"[[rel:{default_edge_type} {entity['title']}]]", "confidence": 1.0, "reason": f"Exakter Treffer ({entity['match']})" }) # 2. Semantic Match (Sliding Window) # Wir zerlegen den Text in relevante Chunks, um Token-Limits zu umgehen # und Fokus zu streuen. search_queries = self._generate_search_queries(text) # Parallel Execution für alle Queries tasks = [self._get_semantic_suggestions_async(q) for q in search_queries] results_list = await asyncio.gather(*tasks) # Ergebnisse mergen und deduplizieren seen_semantic_ids = set() for hits in results_list: for hit in hits: if hit.node_id in existing_target_ids or hit.node_id in seen_semantic_ids: continue # Threshold Tuning: Bei 'nomic' sind Scores oft niedriger (0.4-0.6 ist schon gut) # Wir setzen ihn moderat auf 0.50 if hit.total_score > 0.50: seen_semantic_ids.add(hit.node_id) target_title = hit.payload.get("title") or hit.node_id suggestions.append({ "type": "semantic_match", "text_found": (hit.source.get("text") or "")[:60] + "...", "target_title": target_title, "target_id": hit.node_id, "suggested_edge_type": default_edge_type, "suggested_markdown": f"[[rel:{default_edge_type} {target_title}]]", "confidence": round(hit.total_score, 2), "reason": f"Semantisch ähnlich ({hit.total_score:.2f})" }) # Sortieren nach Confidence suggestions.sort(key=lambda x: x["confidence"], reverse=True) return { "draft_length": len(text), "suggestions_count": len(suggestions), "suggestions": suggestions[:10] # Limit auf Top 10 } # --- Helpers --- def _generate_search_queries(self, text: str) -> List[str]: """ Zerlegt den Text in bis zu 3 Such-Queries: 1. Der Anfang (Kontext/Einleitung) 2. Die Mitte (Details) 3. Das Ende (Fazit/Zusammenfassung) """ if len(text) < 600: return [text] queries = [] # Query 1: Die ersten 400 Zeichen queries.append(text[:400]) # Query 2: Ein Fenster aus der Mitte mid = len(text) // 2 queries.append(text[mid-200 : mid+200]) # Query 3: Die letzten 400 Zeichen if len(text) > 800: queries.append(text[-400:]) return queries async def _get_semantic_suggestions_async(self, text: str): # Nutzt hybrid_retrieve (sync), aber hier in Async Context okay req = QueryRequest(query=text, top_k=5, explain=False) try: res = hybrid_retrieve(req) return res.results except Exception: return [] # ... (Restliche Methoden wie _fetch_all_titles_and_aliases bleiben gleich) ... # Füge hier die Methoden aus dem vorherigen Artefakt ein (fetch_all..., find_entities..., load_type...) # Der Kürze halber lasse ich sie im Snippet weg, da sie unverändert sind. def _load_type_registry(self) -> dict: path = os.getenv("MINDNET_TYPES_FILE", "config/types.yaml") if not os.path.exists(path): if os.path.exists("types.yaml"): path = "types.yaml" else: return {} try: with open(path, "r", encoding="utf-8") as f: return yaml.safe_load(f) or {} except Exception: return {} def _get_default_edge_type(self, note_type: str) -> str: types_cfg = self.registry.get("types", {}) type_def = types_cfg.get(note_type, {}) defaults = type_def.get("edge_defaults") return defaults[0] if defaults else "related_to" def _fetch_all_titles_and_aliases(self) -> List[Dict]: notes = [] next_page = None col = f"{self.prefix}_notes" try: while True: res, next_page = self.client.scroll(collection_name=col, limit=1000, offset=next_page, with_payload=True, with_vectors=False) for point in res: pl = point.payload or {} aliases = pl.get("aliases") or [] if isinstance(aliases, str): aliases = [aliases] notes.append({"id": pl.get("note_id"), "title": pl.get("title"), "aliases": aliases}) if next_page is None: break except Exception: pass return notes def _find_entities_in_text(self, text: str, entities: List[Dict]) -> List[Dict]: found = [] text_lower = text.lower() for entity in entities: title = entity.get("title") if title and title.lower() in text_lower: found.append({"match": title, "title": title, "id": entity["id"]}) continue for alias in entity.get("aliases", []): if str(alias).lower() in text_lower: found.append({"match": alias, "title": title, "id": entity["id"]}) break return found