weitere Anpassungen WP20

This commit is contained in:
Lars 2025-12-23 21:57:50 +01:00
parent 867a7a8b44
commit a908853c30
4 changed files with 118 additions and 192 deletions

View File

@ -1,18 +1,15 @@
"""
FILE: app/core/ingestion.py
DESCRIPTION: Haupt-Ingestion-Logik. Transformiert Markdown in den Graphen (Notes, Chunks, Edges).
WP-20: Integration von Smart Edge Allocation via Hybrid LLM (Gemini/Gemma/OpenRouter).
WP-22: Integration von Content Lifecycle (Status Gate) und Edge Registry Validation.
WP-22: Kontextsensitive Kanten-Validierung mit Fundort-Reporting (Zeilennummern).
WP-22: Multi-Hash Refresh für konsistente Change Detection.
FIX: Robuste Verarbeitung von LLM-Antworten (Dict vs String) zur Vermeidung von Item-Assignment-Errors.
VERSION: 2.11.5
DESCRIPTION: Haupt-Ingestion-Logik. Transformiert Markdown in den Graphen.
WP-20: Smart Edge Allocation via Hybrid LLM (OpenRouter/Gemini).
WP-22: Content Lifecycle, Edge Registry Validation & Multi-Hash.
FIX: Bulletproof JSON Extraction & Prompt Formatting Safety.
VERSION: 2.11.6
STATUS: Active
DEPENDENCIES: app.core.parser, app.core.note_payload, app.core.chunker, app.services.llm_service, app.services.edge_registry
EXTERNAL_CONFIG: config/types.yaml, config/prompts.yaml
"""
import os
import json
import re
import logging
import asyncio
import time
@ -50,8 +47,24 @@ from app.services.llm_service import LLMService
logger = logging.getLogger(__name__)
# --- Helper ---
def extract_json_from_response(text: str) -> Any:
"""Extrahiert JSON-Daten, selbst wenn sie in Markdown-Blöcken stehen."""
if not text: return []
# Suche nach ```json ... ``` oder ``` ... ```
match = re.search(r"```(?:json)?\s*(.*?)\s*```", text, re.DOTALL)
clean_text = match.group(1) if match else text
try:
return json.loads(clean_text.strip())
except json.JSONDecodeError:
# Letzter Versuch: Alles vor der ersten [ und nach der letzten ] entfernen
start = clean_text.find('[')
end = clean_text.rfind(']') + 1
if start != -1 and end != 0:
try: return json.loads(clean_text[start:end])
except: pass
raise
def load_type_registry(custom_path: Optional[str] = None) -> dict:
"""Lädt die types.yaml zur Steuerung der typ-spezifischen Ingestion."""
import yaml
from app.config import get_settings
settings = get_settings()
@ -62,13 +75,11 @@ def load_type_registry(custom_path: Optional[str] = None) -> dict:
except Exception: return {}
def resolve_note_type(requested: Optional[str], reg: dict) -> str:
"""Bestimmt den finalen Notiz-Typ (Fallback auf 'concept')."""
types = reg.get("types", {})
if requested and requested in types: return requested
return "concept"
def effective_chunk_profile_name(fm: dict, note_type: str, reg: dict) -> str:
"""Ermittelt den Namen des zu nutzenden Chunk-Profils."""
override = fm.get("chunking_profile") or fm.get("chunk_profile")
if override and isinstance(override, str): return override
t_cfg = reg.get("types", {}).get(note_type, {})
@ -78,7 +89,6 @@ def effective_chunk_profile_name(fm: dict, note_type: str, reg: dict) -> str:
return reg.get("defaults", {}).get("chunking_profile", "sliding_standard")
def effective_retriever_weight(fm: dict, note_type: str, reg: dict) -> float:
"""Ermittelt das effektive retriever_weight für das Scoring."""
override = fm.get("retriever_weight")
if override is not None:
try: return float(override)
@ -110,203 +120,137 @@ class IngestionService:
except Exception as e:
logger.warning(f"DB init warning: {e}")
def _get_chunk_config_by_profile(self, profile_name: str, note_type: str) -> Dict[str, Any]:
"""Holt die Chunker-Parameter für ein spezifisches Profil."""
profiles = self.registry.get("chunking_profiles", {})
if profile_name in profiles:
cfg = profiles[profile_name].copy()
if "overlap" in cfg and isinstance(cfg["overlap"], list):
cfg["overlap"] = tuple(cfg["overlap"])
return cfg
return get_chunk_config(note_type)
async def _perform_smart_edge_allocation(self, text: str, note_id: str) -> List[Dict]:
"""
WP-20: Nutzt den Hybrid LLM Service für die semantische Kanten-Extraktion.
QUOTEN-SCHUTZ: Bevorzugt OpenRouter (Gemma 2), um Gemini-Tageslimits zu schonen.
"""
"""Nutzt das Hybrid LLM für die semantische Kanten-Extraktion."""
provider = "openrouter" if self.settings.OPENROUTER_API_KEY else self.settings.MINDNET_LLM_PROVIDER
model = self.settings.GEMMA_MODEL
logger.info(f"🚀 [Ingestion] Turbo-Mode: Extracting edges for '{note_id}' using {model} on {provider}")
# WP-22: Hole valide Typen für das Prompt-Template
edge_registry.ensure_latest()
valid_types_str = ", ".join(sorted(list(edge_registry.valid_types)))
template = self.llm.get_prompt("edge_extraction", provider)
try:
# Befülle das Template (v2.5.0 erwartet valid_types)
# FIX: Format-Safety Block
try:
prompt = template.format(
text=text[:6000],
note_id=note_id,
valid_types=valid_types_str
)
response_json = await self.llm.generate_raw_response(
prompt=prompt,
priority="background",
force_json=True,
provider=provider,
model_override=model
)
# Robustes Parsing (WP-20 Fix für 'str' object assignment error)
raw_data = json.loads(response_json)
processed_edges = []
# Das LLM liefert manchmal ein Dict mit einem Key statt einer Liste
if isinstance(raw_data, dict):
logger.debug(f" [Ingestion] LLM returned dict for {note_id}, attempting recovery.")
for key in ["edges", "links", "results", "kanten"]:
if key in raw_data and isinstance(raw_data[key], list):
raw_data = raw_data[key]
break
if not isinstance(raw_data, list):
logger.warning(f"⚠️ [Ingestion] LLM output for {note_id} is not a list: {type(raw_data)}")
except KeyError as ke:
logger.error(f"❌ [Ingestion] Prompt-Template Fehler (Fehlende Maskierung in YAML?): {ke}")
return []
response_json = await self.llm.generate_raw_response(
prompt=prompt, priority="background", force_json=True,
provider=provider, model_override=model
)
# FIX: Robustes JSON-Parsing
raw_data = extract_json_from_response(response_json)
if isinstance(raw_data, dict):
for k in ["edges", "links", "results", "kanten"]:
if k in raw_data and isinstance(raw_data[k], list):
raw_data = raw_data[k]
break
if not isinstance(raw_data, list): return []
processed = []
for item in raw_data:
# Fall 1: Element ist bereits ein Dict (Idealfall)
# FIX: Typ-Check zur Vermeidung von 'str' object assignment errors
if isinstance(item, dict) and "to" in item:
item["provenance"] = "semantic_ai"
item["line"] = f"ai-{provider}"
processed_edges.append(item)
# Fall 2: Element ist ein String (z.B. "kind:target") -> Umwandlung
processed.append(item)
elif isinstance(item, str) and ":" in item:
parts = item.split(":", 1)
processed_edges.append({
processed.append({
"to": parts[1].strip(),
"kind": parts[0].strip(),
"provenance": "semantic_ai",
"line": f"ai-{provider}"
})
else:
logger.debug(f"⏩ [Ingestion] Skipping unparseable AI edge: {item}")
return processed_edges
return processed
except Exception as e:
logger.warning(f"⚠️ [Ingestion] Smart Edge Allocation failed for {note_id} on {provider}: {e}")
logger.warning(f"⚠️ [Ingestion] Smart Edge Allocation failed for {note_id}: {e}")
return []
async def process_file(
self,
file_path: str,
vault_root: str,
force_replace: bool = False,
apply: bool = False,
purge_before: bool = False,
note_scope_refs: bool = False,
hash_source: str = "parsed",
hash_normalize: str = "canonical"
self, file_path: str, vault_root: str,
force_replace: bool = False, apply: bool = False, purge_before: bool = False,
note_scope_refs: bool = False, hash_source: str = "parsed", hash_normalize: str = "canonical"
) -> Dict[str, Any]:
"""Verarbeitet eine Markdown-Datei und schreibt sie in den Graphen."""
result = {"path": file_path, "status": "skipped", "changed": False, "error": None}
# 1. Parse & Frontmatter Validation
try:
parsed = read_markdown(file_path)
if not parsed: return {**result, "error": "Empty or unreadable file"}
if not parsed: return {**result, "error": "Empty file"}
fm = normalize_frontmatter(parsed.frontmatter)
validate_required_frontmatter(fm)
except Exception as e:
logger.error(f"Validation failed for {file_path}: {e}")
return {**result, "error": f"Validation failed: {str(e)}"}
# --- WP-22: Content Lifecycle Gate ---
status = fm.get("status", "draft").lower().strip()
if status in ["system", "template", "archive", "hidden"]:
return {**result, "status": "skipped", "reason": f"lifecycle_status_{status}"}
return {**result, "status": "skipped", "reason": f"lifecycle_{status}"}
# 2. Type & Config Resolution
note_type = resolve_note_type(fm.get("type"), self.registry)
fm["type"] = note_type
effective_profile = effective_chunk_profile_name(fm, note_type, self.registry)
effective_weight = effective_retriever_weight(fm, note_type, self.registry)
fm["chunk_profile"] = effective_profile
fm["retriever_weight"] = effective_weight
# 3. Build Note Payload
try:
note_pl = make_note_payload(parsed, vault_root=vault_root, hash_normalize=hash_normalize, hash_source=hash_source, file_path=file_path)
if not note_pl.get("fulltext"): note_pl["fulltext"] = getattr(parsed, "body", "") or ""
note_pl["retriever_weight"] = effective_weight
note_pl["chunk_profile"] = effective_profile
note_pl["status"] = status
note_id = note_pl["note_id"]
except Exception as e:
return {**result, "error": f"Payload build failed: {str(e)}"}
return {**result, "error": f"Payload failed: {str(e)}"}
# 4. Change Detection (WP-22 Multi-Hash)
old_payload = None
if not force_replace:
old_payload = self._fetch_note_payload(note_id)
has_old = old_payload is not None
old_payload = None if force_replace else self._fetch_note_payload(note_id)
check_key = f"{self.active_hash_mode}:{hash_source}:{hash_normalize}"
old_hashes = (old_payload or {}).get("hashes", {})
old_hash = old_hashes.get(check_key) if isinstance(old_hashes, dict) else None
old_hash = (old_payload or {}).get("hashes", {}).get(check_key)
new_hash = note_pl.get("hashes", {}).get(check_key)
hash_changed = (old_hash != new_hash)
chunks_missing, edges_missing = self._artifacts_missing(note_id)
should_write = force_replace or (not old_payload) or (old_hash != new_hash) or any(self._artifacts_missing(note_id))
should_write = force_replace or (not has_old) or hash_changed or chunks_missing or edges_missing
if not should_write: return {**result, "status": "unchanged", "note_id": note_id}
if not apply: return {**result, "status": "dry-run", "changed": True, "note_id": note_id}
if not should_write:
return {**result, "status": "unchanged", "note_id": note_id}
if not apply:
return {**result, "status": "dry-run", "changed": True, "note_id": note_id}
# 5. Processing (Chunking, Embedding, Edge Generation)
try:
body_text = getattr(parsed, "body", "") or ""
# WP-22 STABILITY PATCH: Prüfen, ob ensure_latest existiert
if hasattr(edge_registry, "ensure_latest"):
edge_registry.ensure_latest()
if hasattr(edge_registry, "ensure_latest"): edge_registry.ensure_latest()
chunk_config = self._get_chunk_config_by_profile(effective_profile, note_type)
chunks = await assemble_chunks(fm["id"], body_text, fm["type"], config=chunk_config)
chunk_pls = make_chunk_payloads(fm, note_pl["path"], chunks, note_text=body_text)
vecs = []
if chunk_pls:
texts = [c.get("window") or c.get("text") or "" for c in chunk_pls]
vecs = await self.embedder.embed_documents(texts)
vecs = await self.embedder.embed_documents([c.get("window") or c.get("text") or "" for c in chunk_pls]) if chunk_pls else []
# --- WP-22/WP-20: Kanten-Extraktion & Validierung ---
edges = []
context = {"file": file_path, "note_id": note_id}
# A. Explizite User-Kanten
explicit_edges = extract_edges_with_context(parsed)
for e in explicit_edges:
for e in extract_edges_with_context(parsed):
e["kind"] = edge_registry.resolve(edge_type=e["kind"], provenance="explicit", context={**context, "line": e.get("line")})
edges.append(e)
# B. WP-20: Smart AI Edges (Hybrid Turbo Acceleration)
ai_edges = await self._perform_smart_edge_allocation(body_text, note_id)
for e in ai_edges:
# Validierung gegen EdgeRegistry (Vermeidet 'Transition' etc.)
valid_kind = edge_registry.resolve(edge_type=e.get("kind"), provenance="semantic_ai", context={**context, "line": e.get("line")})
e["kind"] = valid_kind
e["kind"] = edge_registry.resolve(edge_type=e.get("kind"), provenance="semantic_ai", context={**context, "line": e.get("line")})
edges.append(e)
# C. System-Kanten (Struktur)
try:
raw_system_edges = build_edges_for_note(note_id, chunk_pls, note_level_references=note_pl.get("references", []), include_note_scope_refs=note_scope_refs)
except TypeError:
raw_system_edges = build_edges_for_note(note_id, chunk_pls)
sys_edges = build_edges_for_note(note_id, chunk_pls, note_level_references=note_pl.get("references", []), include_note_scope_refs=note_scope_refs)
except: sys_edges = build_edges_for_note(note_id, chunk_pls)
for e in raw_system_edges:
for e in sys_edges:
valid_kind = edge_registry.resolve(edge_type=e.get("kind", "belongs_to"), provenance="structure", context={**context, "line": "system"})
if valid_kind:
e["kind"] = valid_kind
@ -316,10 +260,8 @@ class IngestionService:
logger.error(f"Processing failed for {file_path}: {e}", exc_info=True)
return {**result, "error": f"Processing failed: {str(e)}"}
# 6. Upsert
try:
if purge_before and has_old: self._purge_artifacts(note_id)
if purge_before and old_payload: self._purge_artifacts(note_id)
n_name, n_pts = points_for_note(self.prefix, note_pl, None, self.dim)
upsert_batch(self.client, n_name, n_pts)
@ -333,15 +275,13 @@ class IngestionService:
return {"path": file_path, "status": "success", "changed": True, "note_id": note_id, "chunks_count": len(chunk_pls), "edges_count": len(edges)}
except Exception as e:
logger.error(f"Upsert failed for {note_id}: {e}", exc_info=True)
return {**result, "error": f"DB Upsert failed: {e}"}
def _fetch_note_payload(self, note_id: str) -> Optional[dict]:
from qdrant_client.http import models as rest
col = f"{self.prefix}_notes"
try:
f = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
pts, _ = self.client.scroll(collection_name=col, scroll_filter=f, limit=1, with_payload=True)
pts, _ = self.client.scroll(collection_name=f"{self.prefix}_notes", scroll_filter=f, limit=1, with_payload=True)
return pts[0].payload if pts else None
except: return None
@ -357,23 +297,15 @@ class IngestionService:
def _purge_artifacts(self, note_id: str):
from qdrant_client.http import models as rest
f = rest.Filter(must=[rest.FieldCondition(key="note_id", match=rest.MatchValue(value=note_id))])
selector = rest.FilterSelector(filter=f)
for suffix in ["chunks", "edges"]:
try: self.client.delete(collection_name=f"{self.prefix}_{suffix}", points_selector=selector)
except Exception: pass
try: self.client.delete(collection_name=f"{self.prefix}_{suffix}", points_selector=rest.FilterSelector(filter=f))
except: pass
async def create_from_text(self, markdown_content: str, filename: str, vault_root: str, folder: str = "00_Inbox") -> Dict[str, Any]:
"""Hilfsmethode zur Erstellung einer Note aus einem Textstream."""
target_dir = os.path.join(vault_root, folder)
os.makedirs(target_dir, exist_ok=True)
file_path = os.path.join(target_dir, filename)
try:
with open(file_path, "w", encoding="utf-8") as f:
f.write(markdown_content)
f.flush()
os.fsync(f.fileno())
await asyncio.sleep(0.1)
logger.info(f"Written file to {file_path}")
except Exception as e:
return {"status": "error", "error": f"Disk write failed: {str(e)}"}
return await self.process_file(file_path=file_path, vault_root=vault_root, apply=True, force_replace=True, purge_before=True)

View File

@ -2,10 +2,10 @@
FILE: app/services/semantic_analyzer.py
DESCRIPTION: KI-gestützte Kanten-Validierung. Nutzt LLM (Background-Priority), um Kanten präzise einem Chunk zuzuordnen.
WP-20 Fix: Volle Kompatibilität mit der gehärteten LLMService (v3.3.2) Kaskade.
WP-20: Unterstützung für Provider-spezifische Routing-Logik beim Import.
VERSION: 2.2.1
WP-22: Integration von valid_types zur Halluzinations-Vermeidung.
VERSION: 2.2.2
STATUS: Active
DEPENDENCIES: app.services.llm_service, json, logging
DEPENDENCIES: app.services.llm_service, app.services.edge_registry, json, logging
LAST_ANALYSIS: 2025-12-23
"""
@ -16,6 +16,8 @@ from dataclasses import dataclass
# Importe
from app.services.llm_service import LLMService
# WP-22: Registry für Vokabular-Erzwingung
from app.services.edge_registry import registry as edge_registry
logger = logging.getLogger(__name__)
@ -53,8 +55,6 @@ class SemanticAnalyzer:
"""
Sendet einen Chunk und eine Liste potenzieller Kanten an das LLM.
Das LLM filtert heraus, welche Kanten für diesen Chunk relevant sind.
WP-20 Fix: Nutzt get_prompt(), um den 'AttributeError: dict object' sicher zu vermeiden.
"""
if not all_edges:
return []
@ -72,25 +72,28 @@ class SemanticAnalyzer:
"OUTPUT: JSON Liste von Strings [\"kind:target\"]."
)
# 2. Kandidaten-Liste formatieren
# 2. Daten für Template vorbereiten (WP-22 Integration)
# Wir laden die validen Typen, um sie dem LLM als Leitplanken zu geben
edge_registry.ensure_latest()
valid_types_str = ", ".join(sorted(list(edge_registry.valid_types)))
edges_str = "\n".join([f"- {e}" for e in all_edges])
# LOG: Request Info
logger.debug(f"🔍 [SemanticAnalyzer] Request: {len(chunk_text)} chars Text, {len(all_edges)} Candidates.")
# 3. Prompt füllen (Nutzt nun sicher einen String dank LLMService v3.3.2)
# 3. Prompt füllen (FIX: valid_types hinzugefügt, um FormatError zu beheben)
try:
final_prompt = prompt_template.format(
chunk_text=chunk_text[:3500],
edge_list=edges_str
edge_list=edges_str,
valid_types=valid_types_str
)
except Exception as format_err:
logger.error(f"❌ [SemanticAnalyzer] Format Error im Prompt-Template: {format_err}")
logger.error(f"❌ [SemanticAnalyzer] Format Error im Prompt-Template (Fehlender Parameter): {format_err}")
return []
try:
# 4. LLM Call mit Traffic Control (Background Priority)
# Der gewählte Provider wird über die .env gesteuert (MINDNET_LLM_PROVIDER)
response_json = await self.llm.generate_raw_response(
prompt=final_prompt,
force_json=True,
@ -135,7 +138,7 @@ class SemanticAnalyzer:
if isinstance(target, str):
raw_candidates.append(f"{key}:{target}")
# 7. Strict Validation Loop (Übernahme aus deiner V2.2.0)
# 7. Strict Validation Loop
for e in raw_candidates:
e_str = str(e)
if self._is_valid_edge_string(e_str):

View File

@ -1,6 +1,6 @@
# config/decision_engine.yaml
# Steuerung der Decision Engine (Intent Recognition & Graph Routing)
# VERSION: 2.6.0 (WP-20: Hybrid LLM & WP-22: Semantic Graph Routing)
# VERSION: 2.6.1 (WP-20: Hybrid LLM & WP-22: Semantic Graph Routing)
# STATUS: Active
version: 2.6
@ -9,11 +9,10 @@ settings:
llm_fallback_enabled: true
# Strategie für den Router selbst (Welches Modell erkennt den Intent?)
# "auto" nutzt den in MINDNET_LLM_PROVIDER gesetzten Standard.
# "auto" nutzt den in MINDNET_LLM_PROVIDER gesetzten Standard (z.B. openrouter).
router_provider: "auto"
# Few-Shot Prompting für den LLM-Router (Slow Path)
# Gemini 1.5 nutzt diesen Kontext für hochpräzise Intent-Erkennung.
# Few-Shot Prompting für den LLM-Router
llm_router_prompt: |
Du bist der zentrale Intent-Klassifikator für Mindnet, einen digitalen Zwilling.
Analysiere die Nachricht und wähle die passende Strategie.
@ -38,10 +37,10 @@ settings:
STRATEGIE:
strategies:
# 1. Fakten-Abfrage (Fallback & Default)
# 1. Fakten-Abfrage (Turbo-Modus via OpenRouter)
FACT:
description: "Reine Wissensabfrage."
preferred_provider: "openrouter" # Schnell und lokal ausreichend
preferred_provider: "openrouter"
trigger_keywords: []
inject_types: []
# WP-22: Definitionen & Hierarchien bevorzugen
@ -53,10 +52,10 @@ strategies:
prompt_template: "rag_template"
prepend_instruction: null
# 2. Entscheidungs-Frage (Power-Strategie)
# 2. Entscheidungs-Frage (Power-Strategie via Gemini)
DECISION:
description: "Der User sucht Rat, Strategie oder Abwägung."
preferred_provider: "gemini" # Nutzt Gemini's Reasoning-Power für WP-20
preferred_provider: "gemini"
trigger_keywords:
- "soll ich"
- "meinung"
@ -67,22 +66,22 @@ strategies:
- "abwägung"
- "vergleich"
inject_types: ["value", "principle", "goal", "risk"]
# WP-22: Risiken und Konsequenzen hervorheben
# WP-22: Risiken und Konsequenzen im Graphen priorisieren
edge_boosts:
blocks: 2.5
solves: 2.0
depends_on: 1.5
risk_of: 2.5
impacts: 2.0 # NEU: Zeige mir alles, was von dieser Entscheidung betroffen ist!
impacts: 2.0
prompt_template: "decision_template"
prepend_instruction: |
!!! ENTSCHEIDUNGS-MODUS (HYBRID AI) !!!
BITTE WÄGE FAKTEN GEGEN FOLGENDE WERTE, PRINZIPIEN UND ZIELE AB:
# 3. Empathie / "Ich"-Modus (Privacy-Fokus)
# 3. Empathie / "Ich"-Modus (Lokal & Privat via Ollama)
EMPATHY:
description: "Reaktion auf emotionale Zustände."
preferred_provider: "ollama" # Private Emotionen bleiben lokal!
preferred_provider: "ollama"
trigger_keywords:
- "ich fühle"
- "traurig"
@ -93,7 +92,6 @@ strategies:
- "überfordert"
- "müde"
inject_types: ["experience", "belief", "profile"]
# WP-22: Weiche Assoziationen & Erfahrungen stärken
edge_boosts:
based_on: 2.0
related_to: 2.0
@ -102,10 +100,10 @@ strategies:
prompt_template: "empathy_template"
prepend_instruction: null
# 4. Coding / Technical
# 4. Coding / Technical (Gemini Power)
CODING:
description: "Technische Anfragen und Programmierung."
preferred_provider: "gemini" # Höheres Weltwissen für moderne Libraries
preferred_provider: "gemini"
trigger_keywords:
- "code"
- "python"
@ -117,7 +115,6 @@ strategies:
- "yaml"
- "bash"
inject_types: ["snippet", "reference", "source"]
# WP-22: Technische Abhängigkeiten
edge_boosts:
uses: 2.5
depends_on: 2.0
@ -125,10 +122,10 @@ strategies:
prompt_template: "technical_template"
prepend_instruction: null
# 5. Interview / Datenerfassung
# 5. Interview / Datenerfassung (Lokal)
INTERVIEW:
description: "Der User möchte Wissen erfassen."
preferred_provider: "ollama" # Lokale Erfassung ist performant genug
preferred_provider: "ollama"
trigger_keywords:
- "neue notiz"
- "etwas notieren"
@ -144,11 +141,3 @@ strategies:
edge_boosts: {}
prompt_template: "interview_template"
prepend_instruction: null
# Schemas kommen aus types.yaml (WP-22)
schemas:
default:
fields:
- "Titel"
- "Thema/Inhalt"
- "Tags"
hint: "Halte es einfach und übersichtlich."

View File

@ -1,6 +1,7 @@
# config/prompts.yaml — Final V2.4.1 (Hybrid & Multi-Provider Support)
# config/prompts.yaml — Final V2.5.1 (Hybrid & Multi-Provider Support)
# WP-20: Optimierte Cloud-Templates.
# FIX: Technische Maskierung (Doppel-Klammern) für JSON-Stabilität bei identischem Inhalt.
# FIX: Technische Maskierung (Doppel-Klammern) in Cloud-Sektionen zur Vermeidung von KeyError.
# OLLAMA: Unverändert laut Benutzeranweisung.
system_prompt: |
Du bist 'mindnet', mein digitaler Zwilling und strategischer Partner.
@ -113,7 +114,7 @@ technical_template:
- Kurze Erklärung des Ansatzes.
- Markdown Code-Block (Copy-Paste fertig).
- Wichtige Edge-Cases.
gemini: "Generiere Code für {query}. Kontext: {context_str}"
gemini: "Generiere Code für {query} unter Berücksichtigung von {context_str}."
openrouter: "Technischer Support: {query}. Kontext: {context_str}"
# ---------------------------------------------------------
@ -189,7 +190,8 @@ edge_allocation_template:
ERLAUBTE TYPEN: {valid_types}
TEXT: {chunk_text}
KANDIDATEN: {edge_list}
OUTPUT: STRIKT JSON-Liste von Strings ["typ:ziel"].
Output: JSON-Liste ["typ:ziel"].
# ---------------------------------------------------------
# 7. SMART EDGE ALLOCATION: Extraktion (Intent: INGEST)
# ---------------------------------------------------------
@ -219,9 +221,9 @@ edge_extraction:
Analysiere '{note_id}'. Extrahiere semantische Beziehungen.
ERLAUBTE TYPEN: {valid_types}
TEXT: {text}
OUTPUT: STRIKT JSON-Liste von Objekten: [{"to": "Ziel", "kind": "typ"}]. Keine Erklärungen!
OUTPUT: STRIKT JSON-Liste von Objekten: [[{{"to": "Ziel", "kind": "typ"}}]]. Keine Erklärungen!
openrouter: |
Wissensgraph-Extraktion für '{note_id}'.
ERLAUBTE TYPEN: {valid_types}
TEXT: {text}
OUTPUT: STRIKT JSON-Liste von Objekten: [{"to": "Ziel", "kind": "typ"}]. Keine Dictionaries mit Schlüsseln wie 'edges'!
OUTPUT: STRIKT JSON-Liste von Objekten: [[{{"to": "X", "kind": "Y"}}]]. Keine Dictionaries mit Schlüsseln wie 'edges'!