diff --git a/app/frontend/ui.py b/app/frontend/ui.py index ac23908..954519e 100644 --- a/app/frontend/ui.py +++ b/app/frontend/ui.py @@ -1,42 +1,15 @@ import streamlit as st -import requests import uuid -import os -import json -import re -import yaml -import unicodedata -from datetime import datetime -from pathlib import Path -from dotenv import load_dotenv -# --- WP-19 GRAPH IMPORTS --- +# --- MODULE IMPORTS --- try: - from streamlit_agraph import agraph, Node, Edge, Config - from qdrant_client import QdrantClient, models -except ImportError: - st.error("Fehlende Bibliotheken! Bitte installiere: pip install streamlit-agraph qdrant-client") + from ui_config import QDRANT_URL, QDRANT_KEY, COLLECTION_PREFIX + from ui_graph_service import GraphExplorerService + from ui_components import render_sidebar, render_chat_interface, render_manual_editor, render_graph_explorer +except ImportError as e: + st.error(f"Import Error: {e}. Bitte stelle sicher, dass alle UI-Dateien im selben Ordner liegen.") st.stop() -# --- CONFIGURATION --- -load_dotenv() -API_BASE_URL = os.getenv("MINDNET_API_URL", "http://localhost:8002") -CHAT_ENDPOINT = f"{API_BASE_URL}/chat" -FEEDBACK_ENDPOINT = f"{API_BASE_URL}/feedback" -INGEST_ANALYZE_ENDPOINT = f"{API_BASE_URL}/ingest/analyze" -INGEST_SAVE_ENDPOINT = f"{API_BASE_URL}/ingest/save" -HISTORY_FILE = Path("data/logs/search_history.jsonl") - -# Qdrant Config (Direct Access for Graph) -QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6333") -QDRANT_KEY = os.getenv("QDRANT_API_KEY", None) -if QDRANT_KEY == "": QDRANT_KEY = None -COLLECTION_PREFIX = os.getenv("COLLECTION_PREFIX", "mindnet") - -# Timeout Strategy -timeout_setting = os.getenv("MINDNET_API_TIMEOUT") or os.getenv("MINDNET_LLM_TIMEOUT") -API_TIMEOUT = float(timeout_setting) if timeout_setting else 300.0 - # --- PAGE SETUP --- st.set_page_config(page_title="mindnet v2.6", page_icon="🧠", layout="wide") @@ -75,676 +48,11 @@ st.markdown(""" if "messages" not in st.session_state: st.session_state.messages = [] if "user_id" not in st.session_state: st.session_state.user_id = str(uuid.uuid4()) -# --- GRAPH STYLING CONFIG (WP-19) --- -# Colors based on types.yaml and standard conventions -GRAPH_COLORS = { - "project": "#ff9f43", # Orange - "concept": "#54a0ff", # Blue - "decision": "#5f27cd", # Purple - "risk": "#ff6b6b", # Red - "person": "#1dd1a1", # Green - "experience": "#feca57",# Yellow - "value": "#00d2d3", # Cyan - "goal": "#ff9ff3", # Pink - "default": "#8395a7" # Grey -} - -# Colors based on edge 'kind' -EDGE_COLORS = { - "depends_on": "#ff6b6b", # Red (Blocker) - "blocks": "#ee5253", # Dark Red - "caused_by": "#ff9ff3", # Pink - "related_to": "#c8d6e5", # Light Grey - "similar_to": "#c8d6e5", # Light Grey - "next": "#54a0ff", # Blue - "derived_from": "#ff9ff3",# Pink - "references": "#bdc3c7", # Grey - "belongs_to": "#2e86de" # Dark Blue - "contributes_to": "#1dd1a1" -} - -# --- HELPER FUNCTIONS --- - -def slugify(value): - if not value: return "" - value = str(value).lower() - replacements = {'ä': 'ae', 'ö': 'oe', 'ü': 'ue', 'ß': 'ss', '&': 'und', '+': 'und'} - for k, v in replacements.items(): - value = value.replace(k, v) - - value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') - value = re.sub(r'[^\w\s-]', '', value).strip() - return re.sub(r'[-\s]+', '-', value) - -def normalize_meta_and_body(meta, body): - ALLOWED_KEYS = {"title", "type", "status", "tags", "id", "created", "updated", "aliases", "lang"} - clean_meta = {} - extra_content = [] - - if "titel" in meta and "title" not in meta: - meta["title"] = meta.pop("titel") - - tag_candidates = ["tags", "emotionale_keywords", "keywords", "schluesselwoerter"] - all_tags = [] - for key in tag_candidates: - if key in meta: - val = meta[key] - if isinstance(val, list): all_tags.extend(val) - elif isinstance(val, str): all_tags.extend([t.strip() for t in val.split(",")]) - - for key, val in meta.items(): - if key in ALLOWED_KEYS: - clean_meta[key] = val - elif key in tag_candidates: - pass - else: - if val and isinstance(val, str): - header = key.replace("_", " ").title() - extra_content.append(f"## {header}\n{val}\n") - - if all_tags: - clean_tags = [] - for t in all_tags: - t_clean = str(t).replace("#", "").strip() - if t_clean: clean_tags.append(t_clean) - clean_meta["tags"] = list(set(clean_tags)) - - if extra_content: - new_section = "\n".join(extra_content) - final_body = f"{new_section}\n{body}" - else: - final_body = body - - return clean_meta, final_body - -def parse_markdown_draft(full_text): - clean_text = full_text.strip() - pattern_block = r"```(?:markdown|md|yaml)?\s*(.*?)\s*```" - match_block = re.search(pattern_block, clean_text, re.DOTALL | re.IGNORECASE) - if match_block: - clean_text = match_block.group(1).strip() - - meta = {} - body = clean_text - yaml_str = "" - - parts = re.split(r"^---+\s*$", clean_text, maxsplit=2, flags=re.MULTILINE) - - if len(parts) >= 3: - yaml_str = parts[1] - body = parts[2] - elif clean_text.startswith("---"): - fallback_match = re.search(r"^---\s*(.*?)(?=\n#)", clean_text, re.DOTALL | re.MULTILINE) - if fallback_match: - yaml_str = fallback_match.group(1) - body = clean_text.replace(f"---{yaml_str}", "", 1).strip() - - if yaml_str: - yaml_str_clean = yaml_str.replace("#", "") - try: - parsed = yaml.safe_load(yaml_str_clean) - if isinstance(parsed, dict): - meta = parsed - except Exception as e: - print(f"YAML Parsing Warning: {e}") - - if not meta.get("title"): - h1_match = re.search(r"^#\s+(.*)$", body, re.MULTILINE) - if h1_match: - meta["title"] = h1_match.group(1).strip() - - if meta.get("type") == "draft": - meta["status"] = "draft" - meta["type"] = "experience" - - return normalize_meta_and_body(meta, body) - -def build_markdown_doc(meta, body): - if "id" not in meta or meta["id"] == "generated_on_save": - raw_title = meta.get('title', 'note') - clean_slug = slugify(raw_title)[:50] or "note" - meta["id"] = f"{datetime.now().strftime('%Y%m%d')}-{clean_slug}" - - meta["updated"] = datetime.now().strftime("%Y-%m-%d") - - ordered_meta = {} - prio_keys = ["id", "type", "title", "status", "tags"] - for k in prio_keys: - if k in meta: ordered_meta[k] = meta.pop(k) - ordered_meta.update(meta) - - try: - yaml_str = yaml.dump(ordered_meta, default_flow_style=None, sort_keys=False, allow_unicode=True).strip() - except: - yaml_str = "error: generating_yaml" - - return f"---\n{yaml_str}\n---\n\n{body}" - -def load_history_from_logs(limit=10): - queries = [] - if HISTORY_FILE.exists(): - try: - with open(HISTORY_FILE, "r", encoding="utf-8") as f: - lines = f.readlines() - for line in reversed(lines): - try: - entry = json.loads(line) - q = entry.get("query_text") - if q and q not in queries: - queries.append(q) - if len(queries) >= limit: break - except: continue - except: pass - return queries - -# --- WP-19 GRAPH SERVICE (Advanced) --- - -class GraphExplorerService: - def __init__(self, url, api_key=None, prefix="mindnet"): - self.client = QdrantClient(url=url, api_key=api_key) - self.prefix = prefix - self.notes_col = f"{prefix}_notes" - self.chunks_col = f"{prefix}_chunks" - self.edges_col = f"{prefix}_edges" - self._note_cache = {} - - def get_ego_graph(self, center_note_id: str): - nodes_dict = {} - unique_edges = {} - - center_note = self._fetch_note_cached(center_note_id) - if not center_note: return [], [] - self._add_node_to_dict(nodes_dict, center_note, is_center=True) - - center_title = center_note.get("title") - - # Chunks laden - scroll_filter = models.Filter( - must=[models.FieldCondition(key="note_id", match=models.MatchValue(value=center_note_id))] - ) - chunks, _ = self.client.scroll( - collection_name=self.chunks_col, scroll_filter=scroll_filter, limit=100, with_payload=True - ) - center_chunk_ids = [c.id for c in chunks] - - raw_edges = [] - - # 1. OUTGOING: Source ist einer unserer Chunks - if center_chunk_ids: - out_filter = models.Filter( - must=[models.FieldCondition(key="source_id", match=models.MatchAny(any=center_chunk_ids))] - ) - res_out, _ = self.client.scroll( - collection_name=self.edges_col, scroll_filter=out_filter, limit=100, with_payload=True - ) - raw_edges.extend(res_out) - - # 2. INCOMING: Target ist Chunk, Titel oder exakte Note-ID - # Hinweis: Target mit #Section (z.B. 'note#header') kann via Keyword-Index schwer gefunden werden, - # wenn wir den Header-Teil nicht kennen. - must_conditions = [] - if center_chunk_ids: - must_conditions.append(models.FieldCondition(key="target_id", match=models.MatchAny(any=center_chunk_ids))) - if center_title: - must_conditions.append(models.FieldCondition(key="target_id", match=models.MatchValue(value=center_title))) - - # NEU: Auch exakte Note-ID als Target prüfen - must_conditions.append(models.FieldCondition(key="target_id", match=models.MatchValue(value=center_note_id))) - - if must_conditions: - in_filter = models.Filter(should=must_conditions) # 'should' wirkt wie OR - res_in, _ = self.client.scroll( - collection_name=self.edges_col, scroll_filter=in_filter, limit=100, with_payload=True - ) - raw_edges.extend(res_in) - - # Verarbeitung - for record in raw_edges: - payload = record.payload - - src_ref = payload.get("source_id") - tgt_ref = payload.get("target_id") - kind = payload.get("kind", "related_to") - provenance = payload.get("provenance", "explicit") - - src_note = self._resolve_note_from_ref(src_ref) - tgt_note = self._resolve_note_from_ref(tgt_ref) - - if src_note and tgt_note: - src_id = src_note['note_id'] - tgt_id = tgt_note['note_id'] - - if src_id != tgt_id: - self._add_node_to_dict(nodes_dict, src_note) - self._add_node_to_dict(nodes_dict, tgt_note) - - key = (src_id, tgt_id) - existing = unique_edges.get(key) - - is_current_explicit = (provenance == "explicit" or provenance == "rule") - should_update = True - - if existing: - is_existing_explicit = (existing['provenance'] == "explicit" or existing['provenance'] == "rule") - if is_existing_explicit and not is_current_explicit: - should_update = False - - if should_update: - unique_edges[key] = { - "source": src_id, "target": tgt_id, "kind": kind, "provenance": provenance - } - - final_edges = [] - for (src, tgt), data in unique_edges.items(): - kind = data['kind'] - prov = data['provenance'] - color = EDGE_COLORS.get(kind, "#bdc3c7") - is_smart = (prov != "explicit" and prov != "rule") - - final_edges.append(Edge( - source=src, target=tgt, label=kind, color=color, dashes=is_smart, - title=f"Provenance: {prov}\nType: {kind}" - )) - - return list(nodes_dict.values()), final_edges - - def _fetch_note_cached(self, note_id): - if note_id in self._note_cache: return self._note_cache[note_id] - res, _ = self.client.scroll( - collection_name=self.notes_col, - scroll_filter=models.Filter(must=[models.FieldCondition(key="note_id", match=models.MatchValue(value=note_id))]), - limit=1, with_payload=True - ) - if res: - self._note_cache[note_id] = res[0].payload - return res[0].payload - return None - - def _resolve_note_from_ref(self, ref_str): - if not ref_str: return None - - # Fall A: Chunk ID (Format: note_id#cXX) - if "#" in ref_str: - # 1. Versuch: Echte Chunk ID in DB suchen - try: - res = self.client.retrieve(collection_name=self.chunks_col, ids=[ref_str], with_payload=True) - if res: - parent_id = res[0].payload.get("note_id") - return self._fetch_note_cached(parent_id) - except: pass - - # 2. Versuch (NEU): Es ist ein Link auf eine Section (z.B. "note-id#Header") - # Wir entfernen den Hash-Teil und suchen die Basis-Notiz - possible_note_id = ref_str.split("#")[0] - note_by_id = self._fetch_note_cached(possible_note_id) - if note_by_id: return note_by_id - - # Fall B: Es ist direkt die Note ID - note_by_id = self._fetch_note_cached(ref_str) - if note_by_id: return note_by_id - - # Fall C: Es ist der Titel (Wikilink) - res, _ = self.client.scroll( - collection_name=self.notes_col, - scroll_filter=models.Filter(must=[models.FieldCondition(key="title", match=models.MatchValue(value=ref_str))]), - limit=1, with_payload=True - ) - if res: - p = res[0].payload - self._note_cache[p['note_id']] = p - return p - - return None - - def _add_node_to_dict(self, node_dict, note_payload, is_center=False): - nid = note_payload.get("note_id") - if nid in node_dict: return - ntype = note_payload.get("type", "default") - color = GRAPH_COLORS.get(ntype, GRAPH_COLORS["default"]) - size = 35 if is_center else 20 - node_dict[nid] = Node( - id=nid, label=note_payload.get("title", nid), size=size, color=color, shape="dot", - title=f"Type: {ntype}\nTags: {note_payload.get('tags')}", font={'color': 'black'} - ) - -# Init Graph Service +# --- SERVICE INIT --- +# Initialisiert den Graph Service einmalig graph_service = GraphExplorerService(QDRANT_URL, QDRANT_KEY, COLLECTION_PREFIX) -# --- API CLIENT --- - -def send_chat_message(message: str, top_k: int, explain: bool): - try: - response = requests.post( - CHAT_ENDPOINT, - json={"message": message, "top_k": top_k, "explain": explain}, - timeout=API_TIMEOUT - ) - response.raise_for_status() - return response.json() - except Exception as e: - return {"error": str(e)} - -def analyze_draft_text(text: str, n_type: str): - try: - response = requests.post(INGEST_ANALYZE_ENDPOINT, json={"text": text, "type": n_type}, timeout=15) - response.raise_for_status() - return response.json() - except Exception as e: - return {"error": str(e)} - -def save_draft_to_vault(markdown_content: str, filename: str = None): - try: - response = requests.post(INGEST_SAVE_ENDPOINT, json={"markdown_content": markdown_content, "filename": filename}, timeout=API_TIMEOUT) - response.raise_for_status() - return response.json() - except Exception as e: - return {"error": str(e)} - -def submit_feedback(query_id, node_id, score, comment=None): - try: - requests.post(FEEDBACK_ENDPOINT, json={"query_id": query_id, "node_id": node_id, "score": score, "comment": comment}, timeout=2) - st.toast(f"Feedback ({score}) gesendet!") - except: pass - -# --- UI COMPONENTS --- - -def render_sidebar(): - with st.sidebar: - st.title("🧠 mindnet") - st.caption("v2.6 | WP-19 Graph View") - mode = st.radio("Modus", ["💬 Chat", "📝 Manueller Editor", "🕸️ Graph Explorer"], index=0) - st.divider() - st.subheader("⚙️ Settings") - top_k = st.slider("Quellen (Top-K)", 1, 10, 5) - explain = st.toggle("Explanation Layer", True) - st.divider() - st.subheader("🕒 Verlauf") - for q in load_history_from_logs(8): - if st.button(f"🔎 {q[:25]}...", key=f"hist_{q}", use_container_width=True): - st.session_state.messages.append({"role": "user", "content": q}) - st.rerun() - return mode, top_k, explain - -def render_draft_editor(msg): - if "query_id" not in msg or not msg["query_id"]: - msg["query_id"] = str(uuid.uuid4()) - - qid = msg["query_id"] - key_base = f"draft_{qid}" - - # State Keys - data_meta_key = f"{key_base}_data_meta" - data_sugg_key = f"{key_base}_data_suggestions" - widget_body_key = f"{key_base}_widget_body" - data_body_key = f"{key_base}_data_body" - - # INIT STATE - if f"{key_base}_init" not in st.session_state: - meta, body = parse_markdown_draft(msg["content"]) - if "type" not in meta: meta["type"] = "default" - if "title" not in meta: meta["title"] = "" - tags = meta.get("tags", []) - meta["tags_str"] = ", ".join(tags) if isinstance(tags, list) else str(tags) - - st.session_state[data_meta_key] = meta - st.session_state[data_sugg_key] = [] - st.session_state[data_body_key] = body.strip() - st.session_state[f"{key_base}_wdg_title"] = meta["title"] - st.session_state[f"{key_base}_wdg_type"] = meta["type"] - st.session_state[f"{key_base}_wdg_tags"] = meta["tags_str"] - st.session_state[f"{key_base}_init"] = True - - # RESURRECTION - if widget_body_key not in st.session_state and data_body_key in st.session_state: - st.session_state[widget_body_key] = st.session_state[data_body_key] - - # CALLBACKS - def _sync_meta(): - meta = st.session_state[data_meta_key] - meta["title"] = st.session_state.get(f"{key_base}_wdg_title", "") - meta["type"] = st.session_state.get(f"{key_base}_wdg_type", "default") - meta["tags_str"] = st.session_state.get(f"{key_base}_wdg_tags", "") - st.session_state[data_meta_key] = meta - - def _sync_body(): - st.session_state[data_body_key] = st.session_state[widget_body_key] - - def _insert_text(text_to_insert): - current = st.session_state.get(widget_body_key, "") - new_text = f"{current}\n\n{text_to_insert}" - st.session_state[widget_body_key] = new_text - st.session_state[data_body_key] = new_text - - def _remove_text(text_to_remove): - current = st.session_state.get(widget_body_key, "") - new_text = current.replace(text_to_remove, "").strip() - st.session_state[widget_body_key] = new_text - st.session_state[data_body_key] = new_text - - # UI LAYOUT - st.markdown(f'
', unsafe_allow_html=True) - st.markdown("### 📝 Entwurf bearbeiten") - - meta_ref = st.session_state[data_meta_key] - c1, c2 = st.columns([2, 1]) - with c1: - st.text_input("Titel", key=f"{key_base}_wdg_title", on_change=_sync_meta) - with c2: - known_types = ["concept", "project", "decision", "experience", "journal", "value", "goal", "principle", "risk", "belief"] - curr_type = st.session_state.get(f"{key_base}_wdg_type", meta_ref["type"]) - if curr_type not in known_types: known_types.append(curr_type) - st.selectbox("Typ", known_types, key=f"{key_base}_wdg_type", on_change=_sync_meta) - - st.text_input("Tags", key=f"{key_base}_wdg_tags", on_change=_sync_meta) - - tab_edit, tab_intel, tab_view = st.tabs(["✏️ Inhalt", "🧠 Intelligence", "👁️ Vorschau"]) - - with tab_edit: - st.text_area("Body", key=widget_body_key, height=500, on_change=_sync_body, label_visibility="collapsed") - - with tab_intel: - st.info("Klicke auf 'Analysieren', um Verknüpfungen für den AKTUELLEN Text zu finden.") - if st.button("🔍 Analyse starten", key=f"{key_base}_analyze"): - st.session_state[data_sugg_key] = [] - text_to_analyze = st.session_state.get(widget_body_key, st.session_state.get(data_body_key, "")) - current_doc_type = st.session_state.get(f"{key_base}_wdg_type", "concept") - - with st.spinner("Analysiere..."): - analysis = analyze_draft_text(text_to_analyze, current_doc_type) - if "error" in analysis: - st.error(f"Fehler: {analysis['error']}") - else: - suggestions = analysis.get("suggestions", []) - st.session_state[data_sugg_key] = suggestions - if not suggestions: st.warning("Keine Vorschläge gefunden.") - else: st.success(f"{len(suggestions)} Vorschläge gefunden.") - - suggestions = st.session_state[data_sugg_key] - if suggestions: - current_text_state = st.session_state.get(widget_body_key, "") - for idx, sugg in enumerate(suggestions): - link_text = sugg.get('suggested_markdown', '') - is_inserted = link_text in current_text_state - bg_color = "#e6fffa" if is_inserted else "#ffffff" - border = "3px solid #28a745" if is_inserted else "3px solid #1a73e8" - st.markdown(f""" -
- {sugg.get('target_title')} ({sugg.get('type')})
- {sugg.get('reason')}
- {link_text} -
- """, unsafe_allow_html=True) - if is_inserted: - st.button("❌ Entfernen", key=f"del_{idx}_{key_base}", on_click=_remove_text, args=(link_text,)) - else: - st.button("➕ Einfügen", key=f"add_{idx}_{key_base}", on_click=_insert_text, args=(link_text,)) - - final_tags_str = st.session_state.get(f"{key_base}_wdg_tags", "") - final_tags = [t.strip() for t in final_tags_str.split(",") if t.strip()] - final_meta = { - "id": "generated_on_save", - "type": st.session_state.get(f"{key_base}_wdg_type", "default"), - "title": st.session_state.get(f"{key_base}_wdg_title", "").strip(), - "status": "draft", - "tags": final_tags - } - final_body = st.session_state.get(widget_body_key, st.session_state[data_body_key]) - if not final_meta["title"]: - h1_match = re.search(r"^#\s+(.*)$", final_body, re.MULTILINE) - if h1_match: final_meta["title"] = h1_match.group(1).strip() - - final_doc = build_markdown_doc(final_meta, final_body) - - with tab_view: - st.markdown('
', unsafe_allow_html=True) - st.markdown(final_doc) - st.markdown('
', unsafe_allow_html=True) - - st.markdown("---") - b1, b2 = st.columns([1, 1]) - with b1: - if st.button("💾 Speichern & Indizieren", type="primary", key=f"{key_base}_save"): - with st.spinner("Speichere im Vault..."): - raw_title = final_meta.get("title", "") - if not raw_title: - clean_body = re.sub(r"[#*_\[\]()]", "", final_body).strip() - raw_title = clean_body[:40] if clean_body else "draft" - safe_title = slugify(raw_title)[:60] or "draft" - fname = f"{datetime.now().strftime('%Y%m%d')}-{safe_title}.md" - result = save_draft_to_vault(final_doc, filename=fname) - if "error" in result: st.error(f"Fehler: {result['error']}") - else: - st.success(f"Gespeichert: {result.get('file_path')}") - st.balloons() - with b2: - if st.button("📋 Code anzeigen", key=f"{key_base}_btn_copy"): - st.code(final_doc, language="markdown") - st.markdown("
", unsafe_allow_html=True) - -def render_chat_interface(top_k, explain): - for idx, msg in enumerate(st.session_state.messages): - with st.chat_message(msg["role"]): - if msg["role"] == "assistant": - intent = msg.get("intent", "UNKNOWN") - src = msg.get("intent_source", "?") - icon = {"EMPATHY":"❤️", "DECISION":"⚖️", "CODING":"💻", "FACT":"📚", "INTERVIEW":"📝"}.get(intent, "🧠") - st.markdown(f'
{icon} Intent: {intent} ({src})
', unsafe_allow_html=True) - - with st.expander("🐞 Debug Raw Payload", expanded=False): - st.json(msg) - - if intent == "INTERVIEW": - render_draft_editor(msg) - else: - st.markdown(msg["content"]) - - if "sources" in msg and msg["sources"]: - for hit in msg["sources"]: - with st.expander(f"📄 {hit.get('note_id', '?')} ({hit.get('total_score', 0):.2f})"): - st.markdown(f"_{hit.get('source', {}).get('text', '')[:300]}..._") - if hit.get('explanation'): - st.caption(f"Grund: {hit['explanation']['reasons'][0]['message']}") - def _cb(qid=msg.get("query_id"), nid=hit.get('node_id')): - val = st.session_state.get(f"fb_src_{qid}_{nid}") - if val is not None: submit_feedback(qid, nid, val+1) - st.feedback("faces", key=f"fb_src_{msg.get('query_id')}_{hit.get('node_id')}", on_change=_cb) - - if "query_id" in msg: - qid = msg["query_id"] - st.feedback("stars", key=f"fb_glob_{qid}", on_change=lambda: submit_feedback(qid, "generated_answer", st.session_state[f"fb_glob_{qid}"]+1)) - else: - st.markdown(msg["content"]) - - if prompt := st.chat_input("Frage Mindnet..."): - st.session_state.messages.append({"role": "user", "content": prompt}) - st.rerun() - - if len(st.session_state.messages) > 0 and st.session_state.messages[-1]["role"] == "user": - with st.chat_message("assistant"): - with st.spinner("Thinking..."): - resp = send_chat_message(st.session_state.messages[-1]["content"], top_k, explain) - if "error" in resp: - st.error(resp["error"]) - else: - st.session_state.messages.append({ - "role": "assistant", - "content": resp.get("answer"), - "intent": resp.get("intent", "FACT"), - "intent_source": resp.get("intent_source", "Unknown"), - "sources": resp.get("sources", []), - "query_id": resp.get("query_id") - }) - st.rerun() - -def render_manual_editor(): - mock_msg = { - "content": "---\ntype: concept\ntitle: Neue Notiz\nstatus: draft\ntags: []\n---\n# Titel\n", - "query_id": "manual_mode_v2" - } - render_draft_editor(mock_msg) - -def render_graph_explorer(): - st.header("🕸️ Graph Explorer (WP-19)") - - col_ctrl, col_graph = st.columns([1, 3]) - - with col_ctrl: - st.subheader("Fokus setzen") - search_term = st.text_input("Suche Notiz (Titel)", placeholder="z.B. Project Alpha") - selected_note_id = None - - if search_term: - # Suche nach Titel für Autocomplete - hits, _ = graph_service.client.scroll( - collection_name=f"{COLLECTION_PREFIX}_notes", - scroll_filter=models.Filter( - must=[models.FieldCondition(key="title", match=models.MatchText(text=search_term))] - ), - limit=10 - ) - options = {h.payload['title']: h.payload['note_id'] for h in hits} - if options: - selected_title = st.selectbox("Wähle Notiz:", list(options.keys())) - selected_note_id = options[selected_title] - else: - st.warning("Keine Notiz gefunden.") - - st.markdown("---") - st.markdown("**Legende:**") - st.markdown(f"🔴 **Blocker** (Risk/Block)") - st.markdown(f"🔵 **Konzept/Struktur**") - st.markdown(f"🟣 **Entscheidung**") - st.markdown(f"--- **Solid**: Explicit Link") - st.markdown(f"- - **Dashed**: Smart/AI Link") - - with col_graph: - if selected_note_id: - with st.spinner(f"Lade Graph für {selected_note_id}..."): - nodes, edges = graph_service.get_ego_graph(selected_note_id) - - if not nodes: - st.error("Knoten konnte nicht geladen werden.") - else: - config = Config( - width=900, - height=700, - directed=True, - physics=True, - hierarchical=False, - nodeHighlightBehavior=True, - highlightColor="#F7A7A6", - collapsible=False - ) - # Rendering the Graph - st.caption(f"Graph zeigt {len(nodes)} Knoten und {len(edges)} Kanten.") - return_value = agraph(nodes=nodes, edges=edges, config=config) - - if return_value: - st.info(f"Auswahl: {return_value}") - else: - st.info("👈 Bitte wähle links eine Notiz aus, um den Graphen zu starten.") - -# --- MAIN --- +# --- MAIN ROUTING --- mode, top_k, explain = render_sidebar() if mode == "💬 Chat": @@ -752,4 +60,4 @@ if mode == "💬 Chat": elif mode == "📝 Manueller Editor": render_manual_editor() elif mode == "🕸️ Graph Explorer": - render_graph_explorer() \ No newline at end of file + render_graph_explorer(graph_service) \ No newline at end of file diff --git a/app/frontend/ui_api.py b/app/frontend/ui_api.py new file mode 100644 index 0000000..8057a26 --- /dev/null +++ b/app/frontend/ui_api.py @@ -0,0 +1,37 @@ +import requests +import streamlit as st +from ui_config import CHAT_ENDPOINT, INGEST_ANALYZE_ENDPOINT, INGEST_SAVE_ENDPOINT, FEEDBACK_ENDPOINT, API_TIMEOUT + +def send_chat_message(message: str, top_k: int, explain: bool): + try: + response = requests.post( + CHAT_ENDPOINT, + json={"message": message, "top_k": top_k, "explain": explain}, + timeout=API_TIMEOUT + ) + response.raise_for_status() + return response.json() + except Exception as e: + return {"error": str(e)} + +def analyze_draft_text(text: str, n_type: str): + try: + response = requests.post(INGEST_ANALYZE_ENDPOINT, json={"text": text, "type": n_type}, timeout=15) + response.raise_for_status() + return response.json() + except Exception as e: + return {"error": str(e)} + +def save_draft_to_vault(markdown_content: str, filename: str = None): + try: + response = requests.post(INGEST_SAVE_ENDPOINT, json={"markdown_content": markdown_content, "filename": filename}, timeout=API_TIMEOUT) + response.raise_for_status() + return response.json() + except Exception as e: + return {"error": str(e)} + +def submit_feedback(query_id, node_id, score, comment=None): + try: + requests.post(FEEDBACK_ENDPOINT, json={"query_id": query_id, "node_id": node_id, "score": score, "comment": comment}, timeout=2) + st.toast(f"Feedback ({score}) gesendet!") + except: pass \ No newline at end of file diff --git a/app/frontend/ui_components.py b/app/frontend/ui_components.py new file mode 100644 index 0000000..30a9959 --- /dev/null +++ b/app/frontend/ui_components.py @@ -0,0 +1,305 @@ +import streamlit as st +import uuid +import re +from datetime import datetime +from streamlit_agraph import agraph, Config +from qdrant_client import models + +from ui_utils import parse_markdown_draft, build_markdown_doc, load_history_from_logs, slugify +from ui_api import save_draft_to_vault, analyze_draft_text, send_chat_message, submit_feedback +from ui_config import HISTORY_FILE, COLLECTION_PREFIX + +def render_sidebar(): + with st.sidebar: + st.title("🧠 mindnet") + st.caption("v2.6 | WP-19 Graph View") + mode = st.radio("Modus", ["💬 Chat", "📝 Manueller Editor", "🕸️ Graph Explorer"], index=0) + st.divider() + st.subheader("⚙️ Settings") + top_k = st.slider("Quellen (Top-K)", 1, 10, 5) + explain = st.toggle("Explanation Layer", True) + st.divider() + st.subheader("🕒 Verlauf") + for q in load_history_from_logs(HISTORY_FILE, 8): + if st.button(f"🔎 {q[:25]}...", key=f"hist_{q}", use_container_width=True): + st.session_state.messages.append({"role": "user", "content": q}) + st.rerun() + return mode, top_k, explain + +def render_draft_editor(msg): + if "query_id" not in msg or not msg["query_id"]: + msg["query_id"] = str(uuid.uuid4()) + + qid = msg["query_id"] + key_base = f"draft_{qid}" + + # State Keys + data_meta_key = f"{key_base}_data_meta" + data_sugg_key = f"{key_base}_data_suggestions" + widget_body_key = f"{key_base}_widget_body" + data_body_key = f"{key_base}_data_body" + + # INIT STATE + if f"{key_base}_init" not in st.session_state: + meta, body = parse_markdown_draft(msg["content"]) + if "type" not in meta: meta["type"] = "default" + if "title" not in meta: meta["title"] = "" + tags = meta.get("tags", []) + meta["tags_str"] = ", ".join(tags) if isinstance(tags, list) else str(tags) + + st.session_state[data_meta_key] = meta + st.session_state[data_sugg_key] = [] + st.session_state[data_body_key] = body.strip() + st.session_state[f"{key_base}_wdg_title"] = meta["title"] + st.session_state[f"{key_base}_wdg_type"] = meta["type"] + st.session_state[f"{key_base}_wdg_tags"] = meta["tags_str"] + st.session_state[f"{key_base}_init"] = True + + # RESURRECTION + if widget_body_key not in st.session_state and data_body_key in st.session_state: + st.session_state[widget_body_key] = st.session_state[data_body_key] + + # CALLBACKS + def _sync_meta(): + meta = st.session_state[data_meta_key] + meta["title"] = st.session_state.get(f"{key_base}_wdg_title", "") + meta["type"] = st.session_state.get(f"{key_base}_wdg_type", "default") + meta["tags_str"] = st.session_state.get(f"{key_base}_wdg_tags", "") + st.session_state[data_meta_key] = meta + + def _sync_body(): + st.session_state[data_body_key] = st.session_state[widget_body_key] + + def _insert_text(text_to_insert): + current = st.session_state.get(widget_body_key, "") + new_text = f"{current}\n\n{text_to_insert}" + st.session_state[widget_body_key] = new_text + st.session_state[data_body_key] = new_text + + def _remove_text(text_to_remove): + current = st.session_state.get(widget_body_key, "") + new_text = current.replace(text_to_remove, "").strip() + st.session_state[widget_body_key] = new_text + st.session_state[data_body_key] = new_text + + # UI LAYOUT + st.markdown(f'
', unsafe_allow_html=True) + st.markdown("### 📝 Entwurf bearbeiten") + + meta_ref = st.session_state[data_meta_key] + c1, c2 = st.columns([2, 1]) + with c1: + st.text_input("Titel", key=f"{key_base}_wdg_title", on_change=_sync_meta) + with c2: + known_types = ["concept", "project", "decision", "experience", "journal", "value", "goal", "principle", "risk", "belief"] + curr_type = st.session_state.get(f"{key_base}_wdg_type", meta_ref["type"]) + if curr_type not in known_types: known_types.append(curr_type) + st.selectbox("Typ", known_types, key=f"{key_base}_wdg_type", on_change=_sync_meta) + + st.text_input("Tags", key=f"{key_base}_wdg_tags", on_change=_sync_meta) + + tab_edit, tab_intel, tab_view = st.tabs(["✏️ Inhalt", "🧠 Intelligence", "👁️ Vorschau"]) + + with tab_edit: + st.text_area("Body", key=widget_body_key, height=500, on_change=_sync_body, label_visibility="collapsed") + + with tab_intel: + st.info("Klicke auf 'Analysieren', um Verknüpfungen für den AKTUELLEN Text zu finden.") + if st.button("🔍 Analyse starten", key=f"{key_base}_analyze"): + st.session_state[data_sugg_key] = [] + text_to_analyze = st.session_state.get(widget_body_key, st.session_state.get(data_body_key, "")) + current_doc_type = st.session_state.get(f"{key_base}_wdg_type", "concept") + + with st.spinner("Analysiere..."): + analysis = analyze_draft_text(text_to_analyze, current_doc_type) + if "error" in analysis: + st.error(f"Fehler: {analysis['error']}") + else: + suggestions = analysis.get("suggestions", []) + st.session_state[data_sugg_key] = suggestions + if not suggestions: st.warning("Keine Vorschläge gefunden.") + else: st.success(f"{len(suggestions)} Vorschläge gefunden.") + + suggestions = st.session_state[data_sugg_key] + if suggestions: + current_text_state = st.session_state.get(widget_body_key, "") + for idx, sugg in enumerate(suggestions): + link_text = sugg.get('suggested_markdown', '') + is_inserted = link_text in current_text_state + bg_color = "#e6fffa" if is_inserted else "#ffffff" + border = "3px solid #28a745" if is_inserted else "3px solid #1a73e8" + st.markdown(f""" +
+ {sugg.get('target_title')} ({sugg.get('type')})
+ {sugg.get('reason')}
+ {link_text} +
+ """, unsafe_allow_html=True) + if is_inserted: + st.button("❌ Entfernen", key=f"del_{idx}_{key_base}", on_click=_remove_text, args=(link_text,)) + else: + st.button("➕ Einfügen", key=f"add_{idx}_{key_base}", on_click=_insert_text, args=(link_text,)) + + final_tags_str = st.session_state.get(f"{key_base}_wdg_tags", "") + final_tags = [t.strip() for t in final_tags_str.split(",") if t.strip()] + final_meta = { + "id": "generated_on_save", + "type": st.session_state.get(f"{key_base}_wdg_type", "default"), + "title": st.session_state.get(f"{key_base}_wdg_title", "").strip(), + "status": "draft", + "tags": final_tags + } + final_body = st.session_state.get(widget_body_key, st.session_state[data_body_key]) + if not final_meta["title"]: + h1_match = re.search(r"^#\s+(.*)$", final_body, re.MULTILINE) + if h1_match: final_meta["title"] = h1_match.group(1).strip() + + final_doc = build_markdown_doc(final_meta, final_body) + + with tab_view: + st.markdown('
', unsafe_allow_html=True) + st.markdown(final_doc) + st.markdown('
', unsafe_allow_html=True) + + st.markdown("---") + b1, b2 = st.columns([1, 1]) + with b1: + if st.button("💾 Speichern & Indizieren", type="primary", key=f"{key_base}_save"): + with st.spinner("Speichere im Vault..."): + raw_title = final_meta.get("title", "") + if not raw_title: + clean_body = re.sub(r"[#*_\[\]()]", "", final_body).strip() + raw_title = clean_body[:40] if clean_body else "draft" + safe_title = slugify(raw_title)[:60] or "draft" + fname = f"{datetime.now().strftime('%Y%m%d')}-{safe_title}.md" + result = save_draft_to_vault(final_doc, filename=fname) + if "error" in result: st.error(f"Fehler: {result['error']}") + else: + st.success(f"Gespeichert: {result.get('file_path')}") + st.balloons() + with b2: + if st.button("📋 Code anzeigen", key=f"{key_base}_btn_copy"): + st.code(final_doc, language="markdown") + st.markdown("
", unsafe_allow_html=True) + +def render_chat_interface(top_k, explain): + for idx, msg in enumerate(st.session_state.messages): + with st.chat_message(msg["role"]): + if msg["role"] == "assistant": + intent = msg.get("intent", "UNKNOWN") + src = msg.get("intent_source", "?") + icon = {"EMPATHY":"❤️", "DECISION":"⚖️", "CODING":"💻", "FACT":"📚", "INTERVIEW":"📝"}.get(intent, "🧠") + st.markdown(f'
{icon} Intent: {intent} ({src})
', unsafe_allow_html=True) + + with st.expander("🐞 Debug Raw Payload", expanded=False): + st.json(msg) + + if intent == "INTERVIEW": + render_draft_editor(msg) + else: + st.markdown(msg["content"]) + + if "sources" in msg and msg["sources"]: + for hit in msg["sources"]: + with st.expander(f"📄 {hit.get('note_id', '?')} ({hit.get('total_score', 0):.2f})"): + st.markdown(f"_{hit.get('source', {}).get('text', '')[:300]}..._") + if hit.get('explanation'): + st.caption(f"Grund: {hit['explanation']['reasons'][0]['message']}") + def _cb(qid=msg.get("query_id"), nid=hit.get('node_id')): + val = st.session_state.get(f"fb_src_{qid}_{nid}") + if val is not None: submit_feedback(qid, nid, val+1) + st.feedback("faces", key=f"fb_src_{msg.get('query_id')}_{hit.get('node_id')}", on_change=_cb) + + if "query_id" in msg: + qid = msg["query_id"] + st.feedback("stars", key=f"fb_glob_{qid}", on_change=lambda: submit_feedback(qid, "generated_answer", st.session_state[f"fb_glob_{qid}"]+1)) + else: + st.markdown(msg["content"]) + + if prompt := st.chat_input("Frage Mindnet..."): + st.session_state.messages.append({"role": "user", "content": prompt}) + st.rerun() + + if len(st.session_state.messages) > 0 and st.session_state.messages[-1]["role"] == "user": + with st.chat_message("assistant"): + with st.spinner("Thinking..."): + resp = send_chat_message(st.session_state.messages[-1]["content"], top_k, explain) + if "error" in resp: + st.error(resp["error"]) + else: + st.session_state.messages.append({ + "role": "assistant", + "content": resp.get("answer"), + "intent": resp.get("intent", "FACT"), + "intent_source": resp.get("intent_source", "Unknown"), + "sources": resp.get("sources", []), + "query_id": resp.get("query_id") + }) + st.rerun() + +def render_manual_editor(): + mock_msg = { + "content": "---\ntype: concept\ntitle: Neue Notiz\nstatus: draft\ntags: []\n---\n# Titel\n", + "query_id": "manual_mode_v2" + } + render_draft_editor(mock_msg) + +def render_graph_explorer(graph_service): + st.header("🕸️ Graph Explorer (WP-19)") + + col_ctrl, col_graph = st.columns([1, 3]) + + with col_ctrl: + st.subheader("Fokus setzen") + search_term = st.text_input("Suche Notiz (Titel)", placeholder="z.B. Project Alpha") + selected_note_id = None + + if search_term: + hits, _ = graph_service.client.scroll( + collection_name=f"{COLLECTION_PREFIX}_notes", + scroll_filter=models.Filter( + must=[models.FieldCondition(key="title", match=models.MatchText(text=search_term))] + ), + limit=10 + ) + options = {h.payload['title']: h.payload['note_id'] for h in hits} + if options: + selected_title = st.selectbox("Wähle Notiz:", list(options.keys())) + selected_note_id = options[selected_title] + else: + st.warning("Keine Notiz gefunden.") + + st.markdown("---") + st.markdown("**Legende:**") + st.markdown(f"🔴 **Blocker** (Risk/Block)") + st.markdown(f"🔵 **Konzept/Struktur**") + st.markdown(f"🟣 **Entscheidung**") + st.markdown(f"🟢 **Beitrag**") + st.markdown(f"--- **Solid**: Explicit Link") + st.markdown(f"- - **Dashed**: Smart/AI Link") + + with col_graph: + if selected_note_id: + with st.spinner(f"Lade Graph für {selected_note_id}..."): + nodes, edges = graph_service.get_ego_graph(selected_note_id) + + if not nodes: + st.error("Knoten konnte nicht geladen werden.") + else: + config = Config( + width=900, + height=700, + directed=True, + physics=True, + hierarchical=False, + nodeHighlightBehavior=True, + highlightColor="#F7A7A6", + collapsible=False + ) + st.caption(f"Graph zeigt {len(nodes)} Knoten und {len(edges)} Kanten.") + return_value = agraph(nodes=nodes, edges=edges, config=config) + + if return_value: + st.info(f"Auswahl: {return_value}") + else: + st.info("👈 Bitte wähle links eine Notiz aus, um den Graphen zu starten.") \ No newline at end of file diff --git a/app/frontend/ui_config.py b/app/frontend/ui_config.py new file mode 100644 index 0000000..af95cff --- /dev/null +++ b/app/frontend/ui_config.py @@ -0,0 +1,49 @@ +import os +from dotenv import load_dotenv +from pathlib import Path + +load_dotenv() + +# --- API & PORTS --- +API_BASE_URL = os.getenv("MINDNET_API_URL", "http://localhost:8002") +CHAT_ENDPOINT = f"{API_BASE_URL}/chat" +FEEDBACK_ENDPOINT = f"{API_BASE_URL}/feedback" +INGEST_ANALYZE_ENDPOINT = f"{API_BASE_URL}/ingest/analyze" +INGEST_SAVE_ENDPOINT = f"{API_BASE_URL}/ingest/save" + +# --- QDRANT --- +QDRANT_URL = os.getenv("QDRANT_URL", "http://localhost:6333") +QDRANT_KEY = os.getenv("QDRANT_API_KEY", None) +if QDRANT_KEY == "": QDRANT_KEY = None +COLLECTION_PREFIX = os.getenv("COLLECTION_PREFIX", "mindnet") + +# --- FILES & TIMEOUTS --- +HISTORY_FILE = Path("data/logs/search_history.jsonl") +timeout_setting = os.getenv("MINDNET_API_TIMEOUT") or os.getenv("MINDNET_LLM_TIMEOUT") +API_TIMEOUT = float(timeout_setting) if timeout_setting else 300.0 + +# --- STYLING CONSTANTS --- +GRAPH_COLORS = { + "project": "#ff9f43", # Orange + "concept": "#54a0ff", # Blau + "decision": "#5f27cd", # Lila + "risk": "#ff6b6b", # Rot + "person": "#1dd1a1", # Grün + "experience": "#feca57",# Gelb + "value": "#00d2d3", # Cyan + "goal": "#ff9ff3", # Pink + "default": "#8395a7" # Grau +} + +EDGE_COLORS = { + "depends_on": "#ff6b6b", # Rot (Blocker) + "blocks": "#ee5253", # Dunkelrot + "caused_by": "#ff9ff3", # Pink + "related_to": "#c8d6e5", # Hellgrau + "similar_to": "#c8d6e5", # Hellgrau + "next": "#54a0ff", # Blau + "derived_from": "#ff9ff3", # Pink + "references": "#bdc3c7", # Grau + "belongs_to": "#2e86de", # Dunkelblau + "contributes_to": "#1dd1a1" # Grün (Neu!) +} \ No newline at end of file diff --git a/app/frontend/ui_praph_service.py b/app/frontend/ui_praph_service.py new file mode 100644 index 0000000..6af4f18 --- /dev/null +++ b/app/frontend/ui_praph_service.py @@ -0,0 +1,181 @@ +from qdrant_client import QdrantClient, models +from streamlit_agraph import Node, Edge +from ui_config import GRAPH_COLORS, EDGE_COLORS + +class GraphExplorerService: + def __init__(self, url, api_key=None, prefix="mindnet"): + self.client = QdrantClient(url=url, api_key=api_key) + self.prefix = prefix + self.notes_col = f"{prefix}_notes" + self.chunks_col = f"{prefix}_chunks" + self.edges_col = f"{prefix}_edges" + self._note_cache = {} + + def get_ego_graph(self, center_note_id: str): + nodes_dict = {} + unique_edges = {} + + # 1. Center Note laden + center_note = self._fetch_note_cached(center_note_id) + if not center_note: return [], [] + self._add_node_to_dict(nodes_dict, center_note, is_center=True) + + center_title = center_note.get("title") + + # 2. Chunks der Center Note finden + scroll_filter = models.Filter( + must=[models.FieldCondition(key="note_id", match=models.MatchValue(value=center_note_id))] + ) + chunks, _ = self.client.scroll( + collection_name=self.chunks_col, scroll_filter=scroll_filter, limit=100, with_payload=True + ) + center_chunk_ids = [c.id for c in chunks] + + raw_edges = [] + + # 3. OUTGOING EDGES: Source = einer meiner Chunks + if center_chunk_ids: + out_filter = models.Filter( + must=[models.FieldCondition(key="source_id", match=models.MatchAny(any=center_chunk_ids))] + ) + res_out, _ = self.client.scroll( + collection_name=self.edges_col, scroll_filter=out_filter, limit=100, with_payload=True + ) + raw_edges.extend(res_out) + + # 4. INCOMING EDGES: Target = Chunk, Titel oder Note-ID + must_conditions = [] + if center_chunk_ids: + must_conditions.append(models.FieldCondition(key="target_id", match=models.MatchAny(any=center_chunk_ids))) + if center_title: + must_conditions.append(models.FieldCondition(key="target_id", match=models.MatchValue(value=center_title))) + + # FIX: Auch exakte Note-ID als Target prüfen + must_conditions.append(models.FieldCondition(key="target_id", match=models.MatchValue(value=center_note_id))) + + if must_conditions: + in_filter = models.Filter(should=must_conditions) # 'should' = OR + res_in, _ = self.client.scroll( + collection_name=self.edges_col, scroll_filter=in_filter, limit=100, with_payload=True + ) + raw_edges.extend(res_in) + + # 5. Verarbeitung & Auflösung + for record in raw_edges: + payload = record.payload + + src_ref = payload.get("source_id") + tgt_ref = payload.get("target_id") + kind = payload.get("kind", "related_to") + provenance = payload.get("provenance", "explicit") + + src_note = self._resolve_note_from_ref(src_ref) + tgt_note = self._resolve_note_from_ref(tgt_ref) + + if src_note and tgt_note: + src_id = src_note['note_id'] + tgt_id = tgt_note['note_id'] + + # Keine Self-Loops und valide Verbindung + if src_id != tgt_id: + self._add_node_to_dict(nodes_dict, src_note) + self._add_node_to_dict(nodes_dict, tgt_note) + + key = (src_id, tgt_id) + existing = unique_edges.get(key) + + # Deduplizierung: Explizite Kanten überschreiben Smart Edges + is_current_explicit = (provenance == "explicit" or provenance == "rule") + should_update = True + + if existing: + is_existing_explicit = (existing['provenance'] == "explicit" or existing['provenance'] == "rule") + if is_existing_explicit and not is_current_explicit: + should_update = False + + if should_update: + unique_edges[key] = { + "source": src_id, "target": tgt_id, "kind": kind, "provenance": provenance + } + + # 6. Agraph Objekte bauen + final_edges = [] + for (src, tgt), data in unique_edges.items(): + kind = data['kind'] + prov = data['provenance'] + + color = EDGE_COLORS.get(kind, "#bdc3c7") + is_smart = (prov != "explicit" and prov != "rule") + + final_edges.append(Edge( + source=src, target=tgt, label=kind, color=color, dashes=is_smart, + title=f"Provenance: {prov}\nType: {kind}" + )) + + return list(nodes_dict.values()), final_edges + + def _fetch_note_cached(self, note_id): + if note_id in self._note_cache: return self._note_cache[note_id] + + res, _ = self.client.scroll( + collection_name=self.notes_col, + scroll_filter=models.Filter(must=[models.FieldCondition(key="note_id", match=models.MatchValue(value=note_id))]), + limit=1, with_payload=True + ) + if res: + self._note_cache[note_id] = res[0].payload + return res[0].payload + return None + + def _resolve_note_from_ref(self, ref_str): + if not ref_str: return None + + # Fall A: Chunk ID (Format: note_id#cXX) + if "#" in ref_str: + # Versuch 1: Echte Chunk ID in DB + try: + res = self.client.retrieve(collection_name=self.chunks_col, ids=[ref_str], with_payload=True) + if res: + parent_id = res[0].payload.get("note_id") + return self._fetch_note_cached(parent_id) + except: pass + + # Versuch 2: Section Link (note-id#Header) -> Hash abschneiden + possible_note_id = ref_str.split("#")[0] + note_by_id = self._fetch_note_cached(possible_note_id) + if note_by_id: return note_by_id + + # Fall B: Es ist direkt die Note ID + note_by_id = self._fetch_note_cached(ref_str) + if note_by_id: return note_by_id + + # Fall C: Es ist der Titel (Wikilink) + res, _ = self.client.scroll( + collection_name=self.notes_col, + scroll_filter=models.Filter(must=[models.FieldCondition(key="title", match=models.MatchValue(value=ref_str))]), + limit=1, with_payload=True + ) + if res: + p = res[0].payload + self._note_cache[p['note_id']] = p + return p + + return None + + def _add_node_to_dict(self, node_dict, note_payload, is_center=False): + nid = note_payload.get("note_id") + if nid in node_dict: return + + ntype = note_payload.get("type", "default") + color = GRAPH_COLORS.get(ntype, GRAPH_COLORS["default"]) + size = 35 if is_center else 20 + + node_dict[nid] = Node( + id=nid, + label=note_payload.get("title", nid), + size=size, + color=color, + shape="dot" if not is_center else "diamond", + title=f"Type: {ntype}\nTags: {note_payload.get('tags')}", + font={'color': 'black'} + ) \ No newline at end of file diff --git a/app/frontend/ui_utils.py b/app/frontend/ui_utils.py new file mode 100644 index 0000000..3afc6d9 --- /dev/null +++ b/app/frontend/ui_utils.py @@ -0,0 +1,137 @@ +import re +import yaml +import unicodedata +import json +from datetime import datetime + +def slugify(value): + if not value: return "" + value = str(value).lower() + replacements = {'ä': 'ae', 'ö': 'oe', 'ü': 'ue', 'ß': 'ss', '&': 'und', '+': 'und'} + for k, v in replacements.items(): + value = value.replace(k, v) + + value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') + value = re.sub(r'[^\w\s-]', '', value).strip() + return re.sub(r'[-\s]+', '-', value) + +def normalize_meta_and_body(meta, body): + ALLOWED_KEYS = {"title", "type", "status", "tags", "id", "created", "updated", "aliases", "lang"} + clean_meta = {} + extra_content = [] + + if "titel" in meta and "title" not in meta: + meta["title"] = meta.pop("titel") + + tag_candidates = ["tags", "emotionale_keywords", "keywords", "schluesselwoerter"] + all_tags = [] + for key in tag_candidates: + if key in meta: + val = meta[key] + if isinstance(val, list): all_tags.extend(val) + elif isinstance(val, str): all_tags.extend([t.strip() for t in val.split(",")]) + + for key, val in meta.items(): + if key in ALLOWED_KEYS: + clean_meta[key] = val + elif key in tag_candidates: + pass + else: + if val and isinstance(val, str): + header = key.replace("_", " ").title() + extra_content.append(f"## {header}\n{val}\n") + + if all_tags: + clean_tags = [] + for t in all_tags: + t_clean = str(t).replace("#", "").strip() + if t_clean: clean_tags.append(t_clean) + clean_meta["tags"] = list(set(clean_tags)) + + if extra_content: + new_section = "\n".join(extra_content) + final_body = f"{new_section}\n{body}" + else: + final_body = body + + return clean_meta, final_body + +def parse_markdown_draft(full_text): + clean_text = full_text.strip() + pattern_block = r"```(?:markdown|md|yaml)?\s*(.*?)\s*```" + match_block = re.search(pattern_block, clean_text, re.DOTALL | re.IGNORECASE) + if match_block: + clean_text = match_block.group(1).strip() + + meta = {} + body = clean_text + yaml_str = "" + + parts = re.split(r"^---+\s*$", clean_text, maxsplit=2, flags=re.MULTILINE) + + if len(parts) >= 3: + yaml_str = parts[1] + body = parts[2] + elif clean_text.startswith("---"): + fallback_match = re.search(r"^---\s*(.*?)(?=\n#)", clean_text, re.DOTALL | re.MULTILINE) + if fallback_match: + yaml_str = fallback_match.group(1) + body = clean_text.replace(f"---{yaml_str}", "", 1).strip() + + if yaml_str: + yaml_str_clean = yaml_str.replace("#", "") + try: + parsed = yaml.safe_load(yaml_str_clean) + if isinstance(parsed, dict): + meta = parsed + except Exception as e: + print(f"YAML Parsing Warning: {e}") + + if not meta.get("title"): + h1_match = re.search(r"^#\s+(.*)$", body, re.MULTILINE) + if h1_match: + meta["title"] = h1_match.group(1).strip() + + if meta.get("type") == "draft": + meta["status"] = "draft" + meta["type"] = "experience" + + return normalize_meta_and_body(meta, body) + +def build_markdown_doc(meta, body): + if "id" not in meta or meta["id"] == "generated_on_save": + raw_title = meta.get('title', 'note') + clean_slug = slugify(raw_title)[:50] or "note" + meta["id"] = f"{datetime.now().strftime('%Y%m%d')}-{clean_slug}" + + meta["updated"] = datetime.now().strftime("%Y-%m-%d") + + ordered_meta = {} + prio_keys = ["id", "type", "title", "status", "tags"] + for k in prio_keys: + if k in meta: ordered_meta[k] = meta.pop(k) + ordered_meta.update(meta) + + try: + yaml_str = yaml.dump(ordered_meta, default_flow_style=None, sort_keys=False, allow_unicode=True).strip() + except: + yaml_str = "error: generating_yaml" + + return f"---\n{yaml_str}\n---\n\n{body}" + +def load_history_from_logs(filepath, limit=10): + queries = [] + if filepath.exists(): + try: + with open(filepath, "r", encoding="utf-8") as f: + lines = f.readlines() + for line in reversed(lines): + try: + entry = json.loads(line) + q = entry.get("query_text") + if q and q not in queries: + queries.append(q) + if len(queries) >= limit: break + except: continue + except: pass + return queries \ No newline at end of file