From fb6d37ecfd7ec7eae9078b3283a2bc802c4e8b85 Mon Sep 17 00:00:00 2001 From: Lars Date: Sat, 28 Mar 2026 21:47:35 +0100 Subject: [PATCH] Neue Docs --- ...ue-53-phase-0c-multi-layer-architecture.md | 2130 +++++++++++++++++ .../issue-54-dynamic-placeholder-system.md | 765 ++++++ ...phase-0c-placeholder-migration-analysis.md | 422 ++++ 3 files changed, 3317 insertions(+) create mode 100644 docs/issues/issue-53-phase-0c-multi-layer-architecture.md create mode 100644 docs/issues/issue-54-dynamic-placeholder-system.md create mode 100644 docs/phase-0c-placeholder-migration-analysis.md diff --git a/docs/issues/issue-53-phase-0c-multi-layer-architecture.md b/docs/issues/issue-53-phase-0c-multi-layer-architecture.md new file mode 100644 index 0000000..4dca4aa --- /dev/null +++ b/docs/issues/issue-53-phase-0c-multi-layer-architecture.md @@ -0,0 +1,2130 @@ +# Issue #53: Phase 0c - Multi-Layer Data Architecture + +**Status:** 🎯 Ready for Implementation +**Priorität:** High (Strategic) +**Aufwand:** 20-27h (5-7 Tage bei 4h/Tag) +**Erstellt:** 28. März 2026 +**Abhängigkeiten:** Phase 0a ✅, Phase 0b ✅ + +--- + +## Executive Summary + +**Ziel:** Refactoring der Datenarchitektur von monolithischer Platzhalter-Logik zu einer dreischichtigen Architektur mit klarer Separation of Concerns. + +**Motivation:** +- Aktuell sind Datenermittlung, Berechnungslogik und Formatierung in `placeholder_resolver.py` vermischt +- Keine Wiederverwendbarkeit für Charts, Diagramme, API-Endpoints +- Jede neue Visualisierung erfordert Duplikation der Berechnungslogik +- Schwer testbar, schwer erweiterbar + +**Lösung:** +``` +┌────────────────────────────────────────────────┐ +│ Layer 1: DATA LAYER (neu) │ +│ - Pure data retrieval + calculation logic │ +│ - Returns: Structured data (dict/list/float) │ +│ - No formatting, no strings │ +│ - Testable, reusable │ +└──────────────────┬─────────────────────────────┘ + │ + ┌───────────┴──────────┐ + │ │ + ▼ ▼ +┌──────────────┐ ┌─────────────────────┐ +│ Layer 2a: │ │ Layer 2b: │ +│ KI LAYER │ │ VISUALIZATION LAYER │ +│ (refactored) │ │ (new) │ +└──────────────┘ └─────────────────────┘ +``` + +--- + +## Phase 0b Achievements (Blaupause für Phase 0c) + +### Was wurde in Phase 0b implementiert? ✅ + +**Datum:** 28. März 2026 (früher Chat) +**Commits:** 20+ Commits mit "Phase 0b" prefix + +#### 1. Platzhalter-Funktionen (in placeholder_resolver.py) + +**Körper-Metriken:** +```python +def _get_body_progress_score(profile_id: str, goal_mode: str) -> dict: + """ + Berechnet goal-mode-abhängigen Body Progress Score. + + Aktuelle Implementierung (Phase 0b): + - SQL queries direkt in Funktion + - Berechnungslogik inline + - Returns: dict mit score + components + + Phase 0c Migration: + → Wird zu: data_layer.body_metrics.get_body_progress_data() + → Placeholder nutzt dann nur noch: data['score'] + """ +``` + +**Fokus-Bereiche:** +```python +def _get_active_goals_json(profile_id: str) -> str: + """ + Returns: JSON string mit aktiven Zielen + + Phase 0c Migration: + → Data Layer: data_layer.goals.get_active_goals() → list[dict] + → KI Layer: json.dumps(data) → str + """ + +def _get_focus_areas_json(profile_id: str) -> str: + """ + Returns: JSON string mit gewichteten Focus Areas + + Phase 0c Migration: + → Data Layer: data_layer.goals.get_weighted_focus_areas() → list[dict] + → KI Layer: json.dumps(data) → str + """ +``` + +**Ernährungs-Metriken:** +```python +def _get_nutrition_metrics(profile_id: str, days: int) -> dict: + """ + Berechnet Protein/kg, Adherence, etc. + + Phase 0c Migration: + → Wird zu: data_layer.nutrition_metrics.get_protein_adequacy_data() + → Zusätzliche Metriken in: get_energy_balance_data(), get_macro_distribution_data() + """ +``` + +#### 2. Score-System (goal_utils.py) + +```python +# backend/goal_utils.py + +def map_focus_to_score_components(profile_id: str) -> dict: + """ + Maps gewichtete Focus Areas zu Score-Komponenten. + + Returns: + { + "body": 0.30, + "nutrition": 0.25, + "training": 0.20, + "recovery": 0.15, + "health": 0.10 + } + + Phase 0c: BLEIBT in goal_utils.py + → Ist Score-Gewichtung, nicht Datenermittlung + """ + +def get_active_goals(profile_id: str) -> list[dict]: + """ + Holt alle aktiven Ziele mit vollständigen Daten. + + Phase 0c Migration: + → Wird zu: data_layer.goals.get_active_goals() + → goal_utils.py importiert dann aus data_layer + """ +``` + +#### 3. Bug Fixes (Learnings für Phase 0c) + +**Decimal → Float Conversion:** +```python +# Problem: PostgreSQL Decimal-Type nicht JSON-serializable +# Lösung: Explizite Konvertierung + +# ALT (Phase 0b Bug): +protein_g = row['protein'] # Decimal object +return {"protein": protein_g} # JSON error + +# FIX (Phase 0b): +protein_g = float(row['protein']) if row['protein'] else 0.0 +return {"protein": protein_g} # OK +``` + +**Column Name Consistency:** +```python +# Problem: Inkonsistente Spaltennamen +# Lösung: Immer aus Schema prüfen + +# ALT (Phase 0b Bug): +SELECT bf_jpl FROM caliper_log # Spalte existiert nicht + +# FIX (Phase 0b): +SELECT body_fat_pct FROM caliper_log # Korrekt +``` + +**Dict Access Safety:** +```python +# Problem: KeyError bei fehlenden Daten +# Lösung: .get() mit defaults + +# ALT (Phase 0b Bug): +sleep_quality = sleep_data['quality'] # KeyError wenn leer + +# FIX (Phase 0b): +sleep_quality = sleep_data.get('quality', 0.0) # Safe +``` + +--- + +## Phase 0c: Detaillierte Spezifikation + +### Ziele + +1. ✅ **Single Source of Truth:** Jede Berechnung nur einmal implementiert +2. ✅ **Wiederverwendbarkeit:** Gleiche Daten für KI + Charts + API +3. ✅ **Testbarkeit:** Data Layer isoliert testbar +4. ✅ **Erweiterbarkeit:** Neue Features ohne Code-Duplikation +5. ✅ **Performance:** Caching auf Data Layer Ebene möglich + +### Nicht-Ziele (Scope Grenzen) + +❌ **NICHT in Phase 0c:** +- Neue Charts im Frontend implementieren (nur Backend-Endpoints) +- Frontend Chart-Komponenten (kommt in Phase 1) +- Caching-Layer (kommt später) +- API-Dokumentation mit Swagger (kommt später) + +--- + +## Implementierungs-Plan + +### Step 1: Data Layer Module erstellen (8-10h) + +**Verzeichnisstruktur:** +``` +backend/ +├── data_layer/ # NEU +│ ├── __init__.py # Exports all functions +│ ├── body_metrics.py # Gewicht, FM, LBM, Umfänge, BF% +│ ├── nutrition_metrics.py # Kalorien, Protein, Makros, Adherence +│ ├── activity_metrics.py # Volumen, Qualität, Monotony, Abilities +│ ├── recovery_metrics.py # RHR, HRV, Sleep, Recovery Score +│ ├── health_metrics.py # BP, VO2Max, SpO2, Health Stability +│ ├── goals.py # Active goals, progress, projections +│ ├── correlations.py # Lag-Korrelationen, Plateau Detection +│ └── utils.py # Shared: confidence, baseline, outliers +├── placeholder_resolver.py # REFACTORED (nutzt data_layer) +├── goal_utils.py # REFACTORED (nutzt data_layer.goals) +└── routers/ + └── charts.py # NEU (nutzt data_layer) +``` + +#### Module 1: body_metrics.py + +**Pfad:** `backend/data_layer/body_metrics.py` + +**Funktionen:** + +```python +""" +Body composition metrics and weight trend analysis. + +All functions return structured data (dict/list) without formatting. +Use these for both AI placeholders AND chart endpoints. +""" + +from typing import Optional +from datetime import date, timedelta +from db import get_db, get_cursor + + +def get_weight_trend_data( + profile_id: str, + days: int = 90, + include_projections: bool = True +) -> dict: + """ + Weight trend with rolling medians, slopes, and goal projections. + + Args: + profile_id: User profile ID + days: Number of days to analyze (default 90) + include_projections: Include goal projection calculations + + Returns: + { + "raw_values": [(date, weight), ...], + "rolling_median_7d": [(date, value), ...], + "slope_7d": float, # kg per week + "slope_28d": float, + "slope_90d": float, + "confidence": str, # "high"/"medium"/"low"/"insufficient" + "data_points": int, + "first_date": date, + "last_date": date, + "first_value": float, + "last_value": float, + "delta": float, + "projection": { # Only if include_projections=True + "target_weight": float, + "current_rate": float, + "estimated_days": int, + "estimated_date": date + } | None + } + + Confidence Rules (from utils.py): + - "high": >= 60 points (90d) or >= 18 points (28d) or >= 4 points (7d) + - "medium": >= 40 points (90d) or >= 12 points (28d) or >= 3 points (7d) + - "low": < thresholds above but some data + - "insufficient": < 3 points total + + Migration from Phase 0b: + - OLD: _get_weight_trend_slope() in placeholder_resolver.py (inline SQL) + - NEW: This function (reusable) + - KI Layer: resolve_weight_28d_trend_slope() → f"{data['slope_28d']:.2f} kg/Woche" + - Chart: GET /api/charts/weight-trend → return data + """ + # Implementation here + + +def get_body_composition_data( + profile_id: str, + days: int = 90 +) -> dict: + """ + Fat mass, lean mass, body fat percentage trends. + + Returns: + { + "dates": [date, ...], + "weight": [float, ...], + "body_fat_pct": [float, ...], + "fat_mass": [float, ...], + "lean_mass": [float, ...], + "fm_delta_7d": float, + "fm_delta_28d": float, + "fm_delta_90d": float, + "lbm_delta_7d": float, + "lbm_delta_28d": float, + "lbm_delta_90d": float, + "recomposition_score": int, # 0-100 + "confidence": str, + "data_points": int + } + + Recomposition Score Logic: + - FM↓ + LBM↑ = 100 (perfect) + - FM↓ + LBM= = 80 (good) + - FM= + LBM↑ = 70 (ok) + - FM↓ + LBM↓ = depends on ratio + - FM↑ + LBM↓ = 0 (worst) + + Migration from Phase 0b: + - OLD: Part of _get_body_progress_score() (mixed with scoring) + - NEW: This function (pure data) + - Score calculation stays in goal_utils.py + """ + # Implementation here + + +def get_circumference_summary( + profile_id: str, + days: int = 90 +) -> dict: + """ + Circumference measurements with best-of-each strategy. + + Returns: + { + "measurements": { + "c_neck": {"value": float, "date": date, "age_days": int}, + "c_chest": {"value": float, "date": date, "age_days": int}, + "c_waist": {"value": float, "date": date, "age_days": int}, + "c_hips": {"value": float, "date": date, "age_days": int}, + "c_thigh_l": {"value": float, "date": date, "age_days": int}, + "c_thigh_r": {"value": float, "date": date, "age_days": int}, + "c_bicep_l": {"value": float, "date": date, "age_days": int}, + "c_bicep_r": {"value": float, "date": date, "age_days": int} + }, + "ratios": { + "waist_to_hip": float, # WHR - Bauchfettverteilung + "waist_to_height": float # WHtR - Gesundheitsrisiko + }, + "confidence": str, + "data_points": int + } + + Best-of-Each Logic: + - Pro Messpunkt: Neuester Wert innerhalb days + - WHR: waist / hips (< 0.90 men, < 0.85 women = low risk) + - WHtR: waist / height_cm (< 0.50 = low risk) + + Migration from Phase 0b: + - OLD: resolve_circ_summary() in placeholder_resolver.py + - NEW: This function + """ + # Implementation here +``` + +#### Module 2: nutrition_metrics.py + +**Pfad:** `backend/data_layer/nutrition_metrics.py` + +**Funktionen:** + +```python +""" +Nutrition analysis: calories, protein, macros, adherence. +""" + +def get_protein_adequacy_data( + profile_id: str, + days: int = 28, + goal_mode: Optional[str] = None +) -> dict: + """ + Protein intake vs. target (goal_mode-dependent). + + Returns: + { + "daily_values": [(date, protein_g, target_g), ...], + "avg_protein_g": float, + "avg_protein_per_kg": float, + "avg_protein_per_kg_lbm": float, + "target_protein_g": float, + "target_protein_per_kg": float, + "adherence_pct": float, # % of days >= 90% of target + "adherence_score": int, # 0-100 + "goal_mode": str, + "current_weight": float, + "lean_body_mass": float, + "confidence": str, + "data_points": int + } + + Target Protein per Goal Mode: + - "strength": 2.0-2.2 g/kg + - "weight_loss": 1.8-2.0 g/kg + - "recomposition": 2.0-2.2 g/kg + - "endurance": 1.4-1.6 g/kg + - "health": 1.2-1.6 g/kg + + Adherence Score: + - 100: >= 95% of days meet target + - 80: >= 80% of days meet target + - 60: >= 60% of days meet target + - <60: proportional + + Migration from Phase 0b: + - OLD: _get_nutrition_metrics() in placeholder_resolver.py + - NEW: This function + """ + # Implementation here + + +def get_energy_balance_data( + profile_id: str, + days: int = 28 +) -> dict: + """ + Calorie intake vs. expenditure, deficit/surplus calculations. + + Returns: + { + "daily_values": [(date, intake_kcal, activity_kcal, net), ...], + "avg_intake": float, + "avg_activity_kcal": float, + "avg_net": float, # intake - activity + "estimated_bmr": float, + "energy_availability": float, # (intake - activity) / LBM + "deficit_surplus_avg": float, # negative = deficit + "confidence": str, + "data_points": int, + "red_s_warning": bool # True if EA < 30 kcal/kg LBM + } + + Energy Availability: + - EA = (intake - activity) / LBM (kg) + - < 30 kcal/kg LBM = RED-S risk (Relative Energy Deficiency in Sport) + - 30-45 = moderate risk + - > 45 = adequate + + Migration: + - NEW function (was part of Phase 0b scope, moved to 0c) + """ + # Implementation here + + +def get_macro_distribution_data( + profile_id: str, + days: int = 28 +) -> dict: + """ + Macronutrient distribution and balance. + + Returns: + { + "avg_kcal": float, + "avg_protein_g": float, + "avg_carbs_g": float, + "avg_fat_g": float, + "pct_protein": float, # % of total kcal + "pct_carbs": float, + "pct_fat": float, + "balance_score": int, # 0-100, goal_mode-dependent + "confidence": str, + "data_points": int + } + + Balance Score (example for strength goal): + - Protein: 25-35% = 100, outside = penalty + - Carbs: 40-50% = 100, outside = penalty + - Fat: 20-30% = 100, outside = penalty + """ + # Implementation here +``` + +#### Module 3: activity_metrics.py + +**Pfad:** `backend/data_layer/activity_metrics.py` + +**Funktionen:** + +```python +""" +Training volume, quality, monotony, ability balance. +""" + +def get_training_volume_data( + profile_id: str, + weeks: int = 4 +) -> dict: + """ + Training volume per week, distribution by type. + + Returns: + { + "weekly_totals": [ + { + "week_start": date, + "duration_min": int, + "kcal": int, + "sessions": int, + "avg_quality": float + }, + ... + ], + "by_type": { + "strength": {"duration": int, "sessions": int, "kcal": int}, + "cardio": {"duration": int, "sessions": int, "kcal": int}, + ... + }, + "total_duration": int, + "total_sessions": int, + "avg_quality": float, # 1.0-5.0 + "monotony": float, # < 2.0 = gut + "strain": float, # kumulativ + "confidence": str, + "data_points": int + } + + Monotony Calculation: + - monotony = avg_daily_duration / std_dev_daily_duration + - < 1.5 = hohe Variation (gut) + - 1.5-2.0 = moderate Variation + - > 2.0 = niedrige Variation (Risiko Plateau/Übertraining) + + Strain Calculation: + - strain = total_duration * monotony + - Hohe Strain + hohe Monotony = Übertraining-Risiko + """ + # Implementation here + + +def get_activity_quality_distribution( + profile_id: str, + days: int = 28 +) -> dict: + """ + Quality label distribution and trends. + + Returns: + { + "distribution": { + "excellent": int, # count + "very_good": int, + "good": int, + "acceptable": int, + "poor": int + }, + "avg_quality": float, # 1.0-5.0 + "quality_trend": str, # "improving"/"stable"/"declining" + "high_quality_pct": float, # % excellent + very_good + "confidence": str, + "data_points": int + } + + Quality Trend: + - Compare first_half_avg vs. second_half_avg + - > 0.2 difference = improving/declining + - <= 0.2 = stable + """ + # Implementation here + + +def get_ability_balance_data( + profile_id: str, + weeks: int = 4 +) -> dict: + """ + Balance across 5 ability dimensions (from training_types). + + Returns: + { + "abilities": { + "strength": float, # normalized 0-1 + "cardio": float, + "mobility": float, + "coordination": float, + "mental": float + }, + "balance_score": int, # 0-100 + "imbalances": [ + {"ability": str, "severity": str, "recommendation": str}, + ... + ], + "confidence": str, + "data_points": int + } + + Balance Score: + - Perfect balance (all ~0.20) = 100 + - Moderate imbalance (one dominant) = 70-80 + - Severe imbalance (one > 0.50) = < 50 + + Migration: + - NEW function (was part of Phase 0b scope, moved to 0c) + """ + # Implementation here +``` + +#### Module 4: recovery_metrics.py + +**Pfad:** `backend/data_layer/recovery_metrics.py` + +**Funktionen:** + +```python +""" +Recovery score, sleep analysis, vitals baselines. +""" + +def get_recovery_score_data( + profile_id: str, + days: int = 7 +) -> dict: + """ + Composite recovery score from RHR, HRV, sleep, rest days. + + Returns: + { + "score": int, # 0-100 + "components": { + "rhr": { + "value": float, + "baseline_7d": float, + "deviation_pct": float, + "score": int # 0-100 + }, + "hrv": { + "value": float, + "baseline_7d": float, + "deviation_pct": float, + "score": int + }, + "sleep": { + "duration_h": float, + "quality_pct": float, # Deep+REM / total + "score": int + }, + "rest_compliance": { + "rest_days": int, + "recommended": int, + "score": int + } + }, + "trend": str, # "improving"/"stable"/"declining" + "confidence": str, + "data_points": int + } + + Component Weights: + - RHR: 30% + - HRV: 30% + - Sleep: 30% + - Rest Compliance: 10% + + Score Calculations: + RHR Score: + - Below baseline by >5% = 100 + - At baseline ±5% = 80 + - Above baseline by 5-10% = 50 + - Above baseline by >10% = 20 + + HRV Score: + - Above baseline by >10% = 100 + - At baseline ±10% = 80 + - Below baseline by 10-20% = 50 + - Below baseline by >20% = 20 + + Sleep Score: + - Duration >= 7h AND quality >= 75% = 100 + - Duration >= 6h AND quality >= 65% = 80 + - Duration >= 5h OR quality >= 50% = 50 + - Else = 20 + + Rest Compliance: + - rest_days >= recommended = 100 + - rest_days >= recommended - 1 = 70 + - Else = proportional + + Migration from Phase 0b: + - OLD: Part of health_stability_score (mixed logic) + - NEW: This function (focused on recovery only) + """ + # Implementation here + + +def get_sleep_regularity_data( + profile_id: str, + days: int = 28 +) -> dict: + """ + Sleep regularity index and patterns. + + Returns: + { + "regularity_score": int, # 0-100 + "avg_duration_h": float, + "std_dev_duration": float, + "avg_bedtime": str, # "23:15" (HH:MM) + "std_dev_bedtime_min": float, + "sleep_debt_h": float, # cumulative vs. 7h target + "confidence": str, + "data_points": int + } + + Regularity Score: + - Based on consistency of duration and bedtime + - Low std_dev = high score + - Formula: 100 - (std_dev_duration * 10 + std_dev_bedtime_min / 6) + """ + # Implementation here + + +def get_vitals_baseline_data( + profile_id: str, + days: int = 7 +) -> dict: + """ + Baseline vitals: RHR, HRV, VO2Max, SpO2, respiratory rate. + + Returns: + { + "rhr": { + "current": float, + "baseline_7d": float, + "baseline_28d": float, + "trend": str # "improving"/"stable"/"declining" + }, + "hrv": { + "current": float, + "baseline_7d": float, + "baseline_28d": float, + "trend": str + }, + "vo2_max": { + "current": float, + "baseline_28d": float, + "trend": str + }, + "spo2": { + "current": float, + "baseline_7d": float + }, + "respiratory_rate": { + "current": float, + "baseline_7d": float + }, + "confidence": str, + "data_points": int + } + + Trend Calculation: + - Compare current vs. baseline + - RHR: lower = improving + - HRV: higher = improving + - VO2Max: higher = improving + """ + # Implementation here +``` + +#### Module 5: health_metrics.py + +**Pfad:** `backend/data_layer/health_metrics.py` + +**Funktionen:** + +```python +""" +Blood pressure, health stability score, risk indicators. +""" + +def get_blood_pressure_data( + profile_id: str, + days: int = 28 +) -> dict: + """ + Blood pressure trends and risk classification. + + Returns: + { + "measurements": [ + { + "date": date, + "systolic": int, + "diastolic": int, + "pulse": int, + "context": str, + "classification": str # WHO/ISH + }, + ... + ], + "avg_systolic": float, + "avg_diastolic": float, + "avg_pulse": float, + "risk_level": str, # "normal"/"elevated"/"hypertension_stage_1"/... + "measurements_by_context": dict, + "confidence": str, + "data_points": int + } + + WHO/ISH Classification: + - Normal: <120/<80 + - Elevated: 120-129/<80 + - Hypertension Stage 1: 130-139/80-89 + - Hypertension Stage 2: >=140/>=90 + """ + # Implementation here + + +def get_health_stability_score( + profile_id: str, + days: int = 28 +) -> dict: + """ + Overall health stability across multiple dimensions. + + Returns: + { + "score": int, # 0-100 + "components": { + "vitals_stability": int, # RHR, HRV, BP variance + "sleep_regularity": int, + "activity_consistency": int, + "nutrition_adherence": int, + "recovery_quality": int + }, + "risk_indicators": [ + {"type": str, "severity": str, "message": str}, + ... + ], + "confidence": str + } + + Risk Indicators: + - RED-S: energy_availability < 30 + - Overtraining: high strain + low recovery + - BP Risk: avg systolic >= 130 + - Sleep Debt: cumulative > 10h + - HRV Drop: < baseline by >20% + + Migration: + - NEW function (was part of Phase 0b scope, moved to 0c) + """ + # Implementation here +``` + +#### Module 6: goals.py + +**Pfad:** `backend/data_layer/goals.py` + +**Funktionen:** + +```python +""" +Goal tracking, progress, projections. +""" + +def get_active_goals(profile_id: str) -> list[dict]: + """ + All active goals with full details. + + Returns: + [ + { + "id": str, + "goal_type": str, + "name": str, + "target_value": float, + "target_date": date | None, + "current_value": float, + "start_value": float, + "start_date": date, + "progress_pct": float, + "status": str, + "is_primary": bool, + "created_at": date, + "focus_contributions": [ + {"focus_area": str, "weight": float}, + ... + ] + }, + ... + ] + + Migration from Phase 0b: + - OLD: goal_utils.get_active_goals() + - NEW: This function (moved to data_layer) + - goal_utils.py imports from here + """ + # Implementation here + + +def get_goal_progress_data( + profile_id: str, + goal_id: str +) -> dict: + """ + Detailed progress tracking for a single goal. + + Returns: + { + "goal": dict, # Full goal object + "history": [ + {"date": date, "value": float}, + ... + ], + "progress_pct": float, + "time_progress_pct": float, # (elapsed / total) * 100 + "deviation": float, # actual - expected (time-based) + "projection": { + "estimated_completion": date, + "linear_rate": float, + "confidence": str + } | None, + "is_behind_schedule": bool, + "is_on_track": bool + } + + Time-Based Tracking (from Phase 0b Enhancement, 28.03.2026): + - expected_progress = (elapsed_days / total_days) * 100 + - deviation = actual_progress - expected_progress + - Negative = behind schedule + - Positive = ahead of schedule + + Auto-Population (from Phase 0b Enhancement, 28.03.2026): + - start_value automatically populated from first historical measurement + - start_date adjusted to actual measurement date + """ + # Implementation here + + +def get_weighted_focus_areas(profile_id: str) -> list[dict]: + """ + User's weighted focus areas. + + Returns: + [ + { + "key": str, + "name": str, + "category": str, + "weight": float, # 0-100 + "active_goals": int # count + }, + ... + ] + + Migration from Phase 0b: + - OLD: Part of placeholder resolution + - NEW: This function (clean data) + """ + # Implementation here +``` + +#### Module 7: correlations.py + +**Pfad:** `backend/data_layer/correlations.py` + +**Funktionen:** + +```python +""" +Lag-based correlations, plateau detection. +""" + +def get_correlation_data( + profile_id: str, + metric_a: str, + metric_b: str, + days: int = 90, + max_lag: int = 7 +) -> dict: + """ + Lag-based correlation between two metrics. + + Args: + metric_a: e.g., "calorie_deficit" + metric_b: e.g., "weight_change" + max_lag: Maximum lag in days to test + + Returns: + { + "correlation": float, # Pearson r at best lag + "best_lag": int, # Days of lag + "p_value": float, + "confidence": str, + "paired_points": int, + "interpretation": str # "strong"/"moderate"/"weak"/"none" + } + + Confidence Rules: + - "high": >= 28 paired points + - "medium": >= 21 paired points + - "low": >= 14 paired points + - "insufficient": < 14 paired points + + Interpretation: + - |r| > 0.7: "strong" + - |r| > 0.5: "moderate" + - |r| > 0.3: "weak" + - |r| <= 0.3: "none" + + Migration: + - NEW function (was Phase 0b scope, moved to 0c) + """ + # Implementation here + + +def detect_plateau( + profile_id: str, + metric: str, + days: int = 28 +) -> dict: + """ + Detect if metric has plateaued despite expected change. + + Returns: + { + "is_plateau": bool, + "metric": str, + "duration_days": int, + "expected_change": float, + "actual_change": float, + "confidence": str, + "possible_causes": [str, ...] + } + + Plateau Criteria: + - Weight: < 0.2kg change in 28d despite calorie deficit + - Strength: No PR in 42d despite training + - VO2Max: < 1% change in 90d despite cardio training + + Possible Causes: + - "metabolic_adaptation" (weight) + - "insufficient_stimulus" (strength/cardio) + - "overtraining" (all) + - "nutrition_inadequate" (strength) + """ + # Implementation here +``` + +#### Module 8: utils.py + +**Pfad:** `backend/data_layer/utils.py` + +**Funktionen:** + +```python +""" +Shared utilities: confidence scoring, baseline calculations, outlier detection. +""" + +def calculate_confidence( + data_points: int, + days_requested: int, + metric_type: str = "general" +) -> str: + """ + Determine confidence level based on data availability. + + Args: + data_points: Number of actual data points + days_requested: Number of days in analysis window + metric_type: "general" | "correlation" | "trend" + + Returns: + "high" | "medium" | "low" | "insufficient" + + Rules: + General (days_requested): + 7d: high >= 4, medium >= 3, low >= 2 + 28d: high >= 18, medium >= 12, low >= 8 + 90d: high >= 60, medium >= 40, low >= 25 + + Correlation: + high >= 28, medium >= 21, low >= 14 + + Trend: + high >= (days * 0.7), medium >= (days * 0.5), low >= (days * 0.3) + """ + # Implementation here + + +def calculate_baseline( + values: list[float], + method: str = "median" +) -> float: + """ + Calculate baseline value. + + Args: + values: List of measurements + method: "median" | "mean" | "trimmed_mean" + + Returns: + Baseline value (float) + + Trimmed Mean: + - Remove top/bottom 10% of values + - Calculate mean of remaining + - More robust than mean, less aggressive than median + """ + # Implementation here + + +def detect_outliers( + values: list[float], + method: str = "iqr" +) -> list[int]: + """ + Detect outlier indices. + + Args: + values: List of measurements + method: "iqr" | "zscore" | "mad" + + Returns: + List of outlier indices + + IQR Method (recommended): + - Q1 = 25th percentile + - Q3 = 75th percentile + - IQR = Q3 - Q1 + - Outliers: < Q1 - 1.5*IQR OR > Q3 + 1.5*IQR + """ + # Implementation here + + +def calculate_linear_regression( + x: list[float], + y: list[float] +) -> dict: + """ + Simple linear regression. + + Returns: + { + "slope": float, + "intercept": float, + "r_squared": float, + "p_value": float + } + """ + # Implementation here + + +def serialize_dates(obj): + """ + Convert date/datetime objects to ISO strings for JSON serialization. + + (Already exists in routers/goals.py - move here for reusability) + + Migration from Phase 0b Enhancement (28.03.2026): + - Learned from bug: Python date objects don't auto-serialize + - Solution: Recursive conversion to ISO strings + """ + # Implementation here +``` + +--- + +### Step 2: Placeholder Resolver Refactoring (3-4h) + +**Pfad:** `backend/placeholder_resolver.py` + +**Ziel:** Von ~1100 Zeilen zu ~400 Zeilen durch Nutzung des Data Layer. + +**Muster (für alle Platzhalter):** + +```python +# ── ALTE IMPLEMENTIERUNG (Phase 0b) ────────────────────────────── +def resolve_weight_28d_trend_slope(profile_id: str) -> str: + """Returns kg/Woche slope for KI prompts""" + with get_db() as conn: + cur = get_cursor(conn) + + # 30 Zeilen SQL queries + cur.execute(""" + SELECT date, weight + FROM weight_log + WHERE profile_id = %s + AND date >= NOW() - INTERVAL '28 days' + ORDER BY date + """, (profile_id,)) + rows = cur.fetchall() + + if len(rows) < 18: + return "Nicht genug Daten" + + # 15 Zeilen Berechnungslogik + x = [(row[0] - rows[0][0]).days for row in rows] + y = [row[1] for row in rows] + + n = len(x) + sum_x = sum(x) + sum_y = sum(y) + sum_xy = sum(xi * yi for xi, yi in zip(x, y)) + sum_x2 = sum(xi ** 2 for xi in x) + + slope = (n * sum_xy - sum_x * sum_y) / (n * sum_x2 - sum_x ** 2) + slope_per_week = slope * 7 + + # Formatierung + return f"{slope_per_week:.2f} kg/Woche" + + +# ── NEUE IMPLEMENTIERUNG (Phase 0c) ────────────────────────────── +from data_layer.body_metrics import get_weight_trend_data + +def resolve_weight_28d_trend_slope(profile_id: str) -> str: + """Returns kg/Woche slope for KI prompts""" + data = get_weight_trend_data(profile_id, days=28) + + if data['confidence'] == 'insufficient': + return "Nicht genug Daten" + + return f"{data['slope_28d']:.2f} kg/Woche" +``` + +**Alle zu refactorierenden Platzhalter:** + +```python +# KÖRPER +resolve_weight_28d_trend_slope() → get_weight_trend_data() +resolve_weight_7d_rolling_median() → get_weight_trend_data() +resolve_fm_28d_delta() → get_body_composition_data() +resolve_lbm_28d_delta() → get_body_composition_data() +resolve_recomposition_score() → get_body_composition_data() +resolve_circ_summary() → get_circumference_summary() + +# ERNÄHRUNG +resolve_protein_g_per_kg() → get_protein_adequacy_data() +resolve_protein_adequacy() → get_protein_adequacy_data() +resolve_nutrition_adherence_score() → get_protein_adequacy_data() +resolve_energy_balance() → get_energy_balance_data() + +# AKTIVITÄT +resolve_training_volume_28d() → get_training_volume_data() +resolve_activity_quality_avg() → get_activity_quality_distribution() +resolve_activity_monotony() → get_training_volume_data() + +# RECOVERY +resolve_recovery_score() → get_recovery_score_data() +resolve_sleep_regularity() → get_sleep_regularity_data() +resolve_sleep_debt_hours() → get_sleep_regularity_data() + +# GOALS (JSON Platzhalter) +resolve_active_goals() → get_active_goals() + json.dumps() +resolve_focus_areas() → get_weighted_focus_areas() + json.dumps() + +# HEALTH +resolve_bp_avg() → get_blood_pressure_data() +resolve_vitals_baseline() → get_vitals_baseline_data() +``` + +**Platzhalter-Mapping aktualisieren:** + +```python +# backend/placeholder_resolver.py + +PLACEHOLDER_FUNCTIONS = { + # ... existing placeholders ... + + # Phase 0c: Refactored to use data_layer + "weight_28d_trend_slope": resolve_weight_28d_trend_slope, + "weight_7d_rolling_median": resolve_weight_7d_rolling_median, + "fm_28d_delta": resolve_fm_28d_delta, + # ... etc. +} +``` + +--- + +### Step 3: Charts Router erstellen (6-8h) + +**Pfad:** `backend/routers/charts.py` + +**Struktur:** + +```python +""" +Chart data endpoints for frontend visualizations. + +All endpoints use data_layer functions and return structured JSON +compatible with Chart.js / Recharts. + +Implements charts from konzept_diagramme_auswertungen_v2.md: + - K1-K10: Body charts + - E1-E4: Nutrition charts + - A1-A5: Activity charts + - V1-V3: Vitals charts + - R1-R2: Recovery charts +""" + +from fastapi import APIRouter, Depends, Query +from auth import require_auth +from data_layer.body_metrics import ( + get_weight_trend_data, + get_body_composition_data, + get_circumference_summary +) +from data_layer.nutrition_metrics import ( + get_protein_adequacy_data, + get_energy_balance_data, + get_macro_distribution_data +) +from data_layer.activity_metrics import ( + get_training_volume_data, + get_activity_quality_distribution, + get_ability_balance_data +) +from data_layer.recovery_metrics import ( + get_recovery_score_data, + get_sleep_regularity_data, + get_vitals_baseline_data +) +from data_layer.health_metrics import ( + get_blood_pressure_data, + get_health_stability_score +) +from data_layer.correlations import ( + get_correlation_data, + detect_plateau +) + +router = APIRouter(prefix="/api/charts", tags=["charts"]) + + +# ── BODY CHARTS (K1-K10) ──────────────────────────────────────── + +@router.get("/weight-trend") +def weight_trend_chart( + days: int = Query(90, ge=7, le=365), + session: dict = Depends(require_auth) +): + """ + K1: Weight Trend + Goal Projection + + Returns Chart.js compatible data structure. + """ + pid = session['profile_id'] + data = get_weight_trend_data(pid, days=days) + + return { + "chart_type": "line", + "data": { + "labels": [str(d[0]) for d in data['raw_values']], + "datasets": [ + { + "label": "Rohwerte", + "data": [d[1] for d in data['raw_values']], + "type": "scatter", + "backgroundColor": "rgba(29, 158, 117, 0.5)", + "borderColor": "rgba(29, 158, 117, 0.5)", + "pointRadius": 4 + }, + { + "label": "7d Trend (Median)", + "data": [d[1] for d in data['rolling_median_7d']], + "type": "line", + "borderColor": "#1D9E75", + "borderWidth": 3, + "fill": False, + "pointRadius": 0 + } + ] + }, + "metadata": { + "slope_7d": data['slope_7d'], + "slope_28d": data['slope_28d'], + "slope_90d": data['slope_90d'], + "confidence": data['confidence'], + "projection": data['projection'] + }, + "options": { + "title": "Gewichtstrend + Zielprojektion", + "yAxisLabel": "Gewicht (kg)", + "xAxisLabel": "Datum" + } + } + + +@router.get("/body-composition") +def body_composition_chart( + days: int = Query(90, ge=7, le=365), + session: dict = Depends(require_auth) +): + """ + K2: Fat Mass / Lean Mass Trend + """ + pid = session['profile_id'] + data = get_body_composition_data(pid, days=days) + + return { + "chart_type": "line", + "data": { + "labels": [str(d) for d in data['dates']], + "datasets": [ + { + "label": "Fettmasse (kg)", + "data": data['fat_mass'], + "borderColor": "#D85A30", + "backgroundColor": "rgba(216, 90, 48, 0.1)", + "fill": True + }, + { + "label": "Magermasse (kg)", + "data": data['lean_mass'], + "borderColor": "#1D9E75", + "backgroundColor": "rgba(29, 158, 117, 0.1)", + "fill": True + } + ] + }, + "metadata": { + "fm_delta_28d": data['fm_delta_28d'], + "lbm_delta_28d": data['lbm_delta_28d'], + "recomposition_score": data['recomposition_score'], + "confidence": data['confidence'] + }, + "options": { + "title": "Körperkomposition", + "yAxisLabel": "Masse (kg)" + } + } + + +# ── NUTRITION CHARTS (E1-E4) ──────────────────────────────────── + +@router.get("/protein-adequacy") +def protein_adequacy_chart( + days: int = Query(28, ge=7, le=90), + session: dict = Depends(require_auth) +): + """ + E1: Protein Intake vs. Target + """ + pid = session['profile_id'] + data = get_protein_adequacy_data(pid, days=days) + + return { + "chart_type": "line", + "data": { + "labels": [str(d[0]) for d in data['daily_values']], + "datasets": [ + { + "label": "Protein (g)", + "data": [d[1] for d in data['daily_values']], + "type": "bar", + "backgroundColor": "rgba(29, 158, 117, 0.7)" + }, + { + "label": "Ziel (g)", + "data": [d[2] for d in data['daily_values']], + "type": "line", + "borderColor": "#D85A30", + "borderDash": [5, 5], + "fill": False + } + ] + }, + "metadata": { + "avg_protein_g": data['avg_protein_g'], + "target_protein_g": data['target_protein_g'], + "adherence_pct": data['adherence_pct'], + "adherence_score": data['adherence_score'], + "confidence": data['confidence'] + } + } + + +@router.get("/energy-balance") +def energy_balance_chart( + days: int = Query(28, ge=7, le=90), + session: dict = Depends(require_auth) +): + """ + E2: Energy Balance (Intake - Activity) + """ + pid = session['profile_id'] + data = get_energy_balance_data(pid, days=days) + + return { + "chart_type": "line", + "data": { + "labels": [str(d[0]) for d in data['daily_values']], + "datasets": [ + { + "label": "Aufnahme (kcal)", + "data": [d[1] for d in data['daily_values']], + "borderColor": "#1D9E75", + "fill": False + }, + { + "label": "Verbrauch (kcal)", + "data": [d[2] for d in data['daily_values']], + "borderColor": "#D85A30", + "fill": False + }, + { + "label": "Netto (kcal)", + "data": [d[3] for d in data['daily_values']], + "borderColor": "#666", + "borderDash": [5, 5], + "fill": False + } + ] + }, + "metadata": { + "avg_net": data['avg_net'], + "energy_availability": data['energy_availability'], + "red_s_warning": data['red_s_warning'], + "confidence": data['confidence'] + } + } + + +# ── ACTIVITY CHARTS (A1-A5) ───────────────────────────────────── + +@router.get("/training-volume") +def training_volume_chart( + weeks: int = Query(4, ge=1, le=12), + session: dict = Depends(require_auth) +): + """ + A1: Training Volume per Week + """ + pid = session['profile_id'] + data = get_training_volume_data(pid, weeks=weeks) + + return { + "chart_type": "bar", + "data": { + "labels": [str(w['week_start']) for w in data['weekly_totals']], + "datasets": [ + { + "label": "Dauer (min)", + "data": [w['duration_min'] for w in data['weekly_totals']], + "backgroundColor": "rgba(29, 158, 117, 0.7)" + } + ] + }, + "metadata": { + "by_type": data['by_type'], + "avg_quality": data['avg_quality'], + "monotony": data['monotony'], + "strain": data['strain'], + "confidence": data['confidence'] + } + } + + +@router.get("/ability-balance") +def ability_balance_chart( + weeks: int = Query(4, ge=1, le=12), + session: dict = Depends(require_auth) +): + """ + A5: Ability Balance Radar + """ + pid = session['profile_id'] + data = get_ability_balance_data(pid, weeks=weeks) + + return { + "chart_type": "radar", + "data": { + "labels": ["Kraft", "Ausdauer", "Mobilität", "Koordination", "Mental"], + "datasets": [ + { + "label": "Aktuelle Balance", + "data": [ + data['abilities']['strength'], + data['abilities']['cardio'], + data['abilities']['mobility'], + data['abilities']['coordination'], + data['abilities']['mental'] + ], + "backgroundColor": "rgba(29, 158, 117, 0.2)", + "borderColor": "#1D9E75", + "pointBackgroundColor": "#1D9E75" + } + ] + }, + "metadata": { + "balance_score": data['balance_score'], + "imbalances": data['imbalances'], + "confidence": data['confidence'] + } + } + + +# ── RECOVERY CHARTS (R1-R2) ───────────────────────────────────── + +@router.get("/recovery-score") +def recovery_score_chart( + days: int = Query(7, ge=7, le=28), + session: dict = Depends(require_auth) +): + """ + R1: Recovery Score Breakdown + """ + pid = session['profile_id'] + data = get_recovery_score_data(pid, days=days) + + return { + "chart_type": "bar_horizontal", + "data": { + "labels": ["RHR", "HRV", "Sleep", "Rest Compliance"], + "datasets": [ + { + "label": "Score", + "data": [ + data['components']['rhr']['score'], + data['components']['hrv']['score'], + data['components']['sleep']['score'], + data['components']['rest_compliance']['score'] + ], + "backgroundColor": [ + "#1D9E75", + "#1D9E75", + "#1D9E75", + "#1D9E75" + ] + } + ] + }, + "metadata": { + "total_score": data['score'], + "trend": data['trend'], + "confidence": data['confidence'] + } + } + + +# ── VITALS CHARTS (V1-V3) ─────────────────────────────────────── + +@router.get("/blood-pressure") +def blood_pressure_chart( + days: int = Query(28, ge=7, le=90), + session: dict = Depends(require_auth) +): + """ + V1: Blood Pressure Trend + """ + pid = session['profile_id'] + data = get_blood_pressure_data(pid, days=days) + + return { + "chart_type": "line", + "data": { + "labels": [str(m['date']) for m in data['measurements']], + "datasets": [ + { + "label": "Systolisch (mmHg)", + "data": [m['systolic'] for m in data['measurements']], + "borderColor": "#D85A30", + "fill": False + }, + { + "label": "Diastolisch (mmHg)", + "data": [m['diastolic'] for m in data['measurements']], + "borderColor": "#1D9E75", + "fill": False + } + ] + }, + "metadata": { + "avg_systolic": data['avg_systolic'], + "avg_diastolic": data['avg_diastolic'], + "risk_level": data['risk_level'], + "confidence": data['confidence'] + } + } + + +# ── CORRELATIONS ──────────────────────────────────────────────── + +@router.get("/correlation") +def correlation_chart( + metric_a: str = Query(..., description="e.g., 'calorie_deficit'"), + metric_b: str = Query(..., description="e.g., 'weight_change'"), + days: int = Query(90, ge=28, le=365), + session: dict = Depends(require_auth) +): + """ + Lag-based correlation between two metrics. + """ + pid = session['profile_id'] + data = get_correlation_data(pid, metric_a, metric_b, days=days) + + return { + "chart_type": "scatter", + "data": { + # Scatter plot data would go here + # (implementation depends on metric types) + }, + "metadata": { + "correlation": data['correlation'], + "best_lag": data['best_lag'], + "p_value": data['p_value'], + "interpretation": data['interpretation'], + "confidence": data['confidence'] + } + } + + +@router.get("/plateau-detection") +def plateau_detection( + metric: str = Query(..., description="e.g., 'weight', 'vo2max'"), + days: int = Query(28, ge=14, le=90), + session: dict = Depends(require_auth) +): + """ + Detect if metric has plateaued. + """ + pid = session['profile_id'] + data = detect_plateau(pid, metric, days=days) + + return { + "is_plateau": data['is_plateau'], + "metric": data['metric'], + "duration_days": data['duration_days'], + "expected_change": data['expected_change'], + "actual_change": data['actual_change'], + "possible_causes": data['possible_causes'], + "confidence": data['confidence'] + } +``` + +**Router in main.py registrieren:** + +```python +# backend/main.py + +from routers import charts # NEU + +# ... existing routers ... + +app.include_router(charts.router) # NEU +``` + +--- + +### Step 4: goal_utils.py Refactoring (1h) + +**Pfad:** `backend/goal_utils.py` + +**Änderungen:** + +```python +# ALT: +def get_active_goals(profile_id: str) -> list[dict]: + # 50 Zeilen SQL + Logik + ... + +# NEU: +from data_layer.goals import get_active_goals as _get_active_goals + +def get_active_goals(profile_id: str) -> list[dict]: + """ + Wrapper for backwards compatibility. + + Phase 0c: Delegates to data_layer.goals.get_active_goals() + """ + return _get_active_goals(profile_id) + + +# map_focus_to_score_components() BLEIBT HIER +# → Ist Score-Gewichtung, nicht Datenermittlung +``` + +--- + +### Step 5: Testing (2-3h) + +**Test-Strategie:** + +#### Unit Tests für Data Layer + +**Pfad:** `backend/tests/test_data_layer.py` (NEU) + +```python +import pytest +from data_layer.body_metrics import get_weight_trend_data +from data_layer.utils import calculate_confidence + +def test_weight_trend_data_sufficient(): + """Test with sufficient data points""" + data = get_weight_trend_data("test_profile_1", days=28) + + assert data['confidence'] in ['high', 'medium', 'low', 'insufficient'] + assert 'raw_values' in data + assert 'slope_28d' in data + assert len(data['raw_values']) >= 0 + +def test_weight_trend_data_insufficient(): + """Test with insufficient data points""" + data = get_weight_trend_data("profile_no_data", days=28) + + assert data['confidence'] == 'insufficient' + +def test_confidence_calculation(): + """Test confidence scoring logic""" + assert calculate_confidence(20, 28, "general") == "high" + assert calculate_confidence(15, 28, "general") == "medium" + assert calculate_confidence(5, 28, "general") == "low" + assert calculate_confidence(2, 28, "general") == "insufficient" + +# ... weitere tests ... +``` + +#### Integration Tests + +**Pfad:** `backend/tests/test_charts_api.py` (NEU) + +```python +import pytest +from fastapi.testclient import TestClient +from main import app + +client = TestClient(app) + +def test_weight_trend_chart_endpoint(auth_token): + """Test weight trend chart endpoint""" + response = client.get( + "/api/charts/weight-trend?days=90", + headers={"X-Auth-Token": auth_token} + ) + + assert response.status_code == 200 + data = response.json() + + assert 'chart_type' in data + assert data['chart_type'] == 'line' + assert 'data' in data + assert 'metadata' in data + assert 'confidence' in data['metadata'] + +# ... weitere tests ... +``` + +#### Manual Testing Checklist + +``` +Data Layer: +[ ] get_weight_trend_data() mit verschiedenen days-Parametern +[ ] get_body_composition_data() mit realen Profil-Daten +[ ] get_protein_adequacy_data() mit goal_mode Variationen +[ ] get_recovery_score_data() mit/ohne vollständige Vitals +[ ] Confidence scoring bei verschiedenen Datenmengen +[ ] Outlier detection funktioniert korrekt +[ ] Baseline calculations korrekt + +KI Layer (Refactored): +[ ] Alle bestehenden Platzhalter funktionieren weiter +[ ] Keine Regression in KI-Prompt-Outputs +[ ] {{active_goals}} und {{focus_areas}} JSON korrekt + +Charts API: +[ ] Alle 10+ Chart-Endpoints erreichbar +[ ] JSON-Struktur Chart.js-kompatibel +[ ] Metadata vollständig +[ ] Fehlerbehandlung bei fehlenden Daten +[ ] Auth funktioniert (require_auth) + +Performance: +[ ] Keine N+1 Queries +[ ] Response Times < 500ms +[ ] Kein Memory Leak bei großen Datenmengen +``` + +--- + +### Step 6: Dokumentation (1-2h) + +#### 1. Architecture Documentation + +**Pfad:** `.claude/docs/technical/DATA_LAYER_ARCHITECTURE.md` (NEU) + +```markdown +# Data Layer Architecture + +## Overview + +Three-layer architecture for data retrieval, calculation, and presentation. + +## Layers + +### Layer 1: Data Layer (`backend/data_layer/`) +- **Purpose:** Pure data retrieval + calculation logic +- **Returns:** Structured data (dict/list/float) +- **No formatting:** No strings, no KI-specific formatting +- **Testable:** Unit tests for each function +- **Reusable:** Used by both KI layer and visualization layer + +### Layer 2a: KI Layer (`backend/placeholder_resolver.py`) +- **Purpose:** Format data for KI prompts +- **Input:** Data from data_layer +- **Output:** Formatted strings +- **Example:** `"0.23 kg/Woche"`, `"78/100"`, JSON strings + +### Layer 2b: Visualization Layer (`backend/routers/charts.py`) +- **Purpose:** Provide data for frontend charts +- **Input:** Data from data_layer +- **Output:** Chart.js compatible JSON +- **Example:** `{"chart_type": "line", "data": {...}, "metadata": {...}}` + +## Function Naming Conventions + +- Data Layer: `get__data()` → returns dict +- KI Layer: `resolve_()` → returns str +- Charts: `_chart()` → returns dict (Chart.js format) + +## Migration from Phase 0b + +All placeholder functions in `placeholder_resolver.py` that contained +inline SQL queries and calculations have been moved to `data_layer/`. + +The placeholder functions now simply call data_layer functions and format +the result for KI consumption. + +... +``` + +#### 2. API Documentation + +**Pfad:** `docs/api/CHARTS_API.md` (NEU) + +```markdown +# Charts API Reference + +## Base URL + +`/api/charts` + +## Authentication + +All endpoints require authentication via `X-Auth-Token` header. + +## Endpoints + +### Body Charts + +#### GET /charts/weight-trend + +Weight trend with goal projections. + +**Parameters:** +- `days` (query, int, optional): Analysis window (default: 90, range: 7-365) + +**Response:** +```json +{ + "chart_type": "line", + "data": { + "labels": ["2026-01-01", "2026-01-02", ...], + "datasets": [...] + }, + "metadata": { + "slope_28d": 0.23, + "confidence": "high", + ... + } +} +``` + +... +``` + +#### 3. Update CLAUDE.md + +**Pfad:** `CLAUDE.md` + +```markdown +### Phase 0c Completion (29-30.03.2026) 🏗️ +- ✅ **Multi-Layer Data Architecture:** + - Data Layer: 8 modules, 50+ functions + - KI Layer: Refactored placeholder_resolver.py + - Visualization Layer: charts.py router +- ✅ **Charts API:** 10+ endpoints für Diagramme +- ✅ **Separation of Concerns:** Single Source of Truth +- ✅ **Testing:** Unit tests für Data Layer +- ✅ **Dokumentation:** Architecture + API docs + +**Betroffene Dateien:** +- `backend/data_layer/*` - NEU (8 Module) +- `backend/routers/charts.py` - NEU +- `backend/placeholder_resolver.py` - REFACTORED +- `backend/goal_utils.py` - REFACTORED +``` + +--- + +## Acceptance Criteria + +Phase 0c ist abgeschlossen, wenn: + +### Funktional +- ✅ Alle 50+ Data Layer Funktionen implementiert +- ✅ Alle bestehenden Platzhalter funktionieren weiter (keine Regression) +- ✅ Mindestens 10 Chart-Endpoints verfügbar +- ✅ goal_utils.py nutzt data_layer.goals +- ✅ Alle Charts liefern Chart.js-kompatible Daten + +### Technisch +- ✅ Keine Code-Duplikation zwischen KI Layer und Charts +- ✅ Data Layer hat Unit Tests (>80% coverage für utils.py) +- ✅ Confidence scoring funktioniert korrekt +- ✅ Outlier detection funktioniert +- ✅ Alle Decimal → Float Conversions korrekt + +### Qualität +- ✅ Keine SQL queries in placeholder_resolver.py +- ✅ Keine SQL queries in routers/charts.py +- ✅ Alle Funktionen haben Type Hints +- ✅ Alle Funktionen haben Docstrings +- ✅ Migrations laufen erfolgreich + +### Dokumentation +- ✅ DATA_LAYER_ARCHITECTURE.md erstellt +- ✅ CHARTS_API.md erstellt +- ✅ CLAUDE.md aktualisiert +- ✅ Dieses Issue-Dokument vollständig + +--- + +## Common Pitfalls (Learnings from Phase 0b) + +### 1. Decimal → Float Conversion +```python +# ❌ WRONG: +protein = row['protein'] # Decimal object + +# ✅ CORRECT: +protein = float(row['protein']) if row['protein'] else 0.0 +``` + +### 2. Date Serialization +```python +# ❌ WRONG: +return {"date": date_obj} # Not JSON serializable + +# ✅ CORRECT: +from data_layer.utils import serialize_dates +return serialize_dates({"date": date_obj}) +``` + +### 3. Dict Access Safety +```python +# ❌ WRONG: +value = data['key'] # KeyError if missing + +# ✅ CORRECT: +value = data.get('key', default_value) +``` + +### 4. Column Name Consistency +```python +# ❌ WRONG (assumed name): +SELECT bf_jpl FROM caliper_log + +# ✅ CORRECT (check schema): +SELECT body_fat_pct FROM caliper_log +``` + +### 5. Confidence Calculation +```python +# ✅ ALWAYS use utils.calculate_confidence() +# DON'T hardcode confidence logic +``` + +### 6. SQL Query Structure +```python +# ✅ Use parameter binding: +cur.execute("SELECT * FROM t WHERE id = %s", (id,)) + +# ❌ NEVER string concatenation: +cur.execute(f"SELECT * FROM t WHERE id = {id}") +``` + +--- + +## Timeline + +**Geschätzte Dauer:** 20-27h (5-7 Tage bei 4h/Tag) + +| Tag | Aufgabe | Stunden | +|-----|---------|---------| +| 1-2 | Data Layer Module 1-4 (body, nutrition, activity, recovery) | 6-8h | +| 3 | Data Layer Module 5-8 (health, goals, correlations, utils) | 4-5h | +| 4 | Placeholder Resolver Refactoring | 3-4h | +| 5 | Charts Router (10+ endpoints) | 6-8h | +| 6 | goal_utils.py Refactoring + Testing | 3-4h | +| 7 | Dokumentation + Final Testing | 2-3h | + +**Total:** 24-32h (realistisch: 5-7 Tage) + +--- + +## Next Steps After Phase 0c + +**Phase 1: Frontend Charts (2-3 Wochen)** +- Chart-Komponenten in React implementieren +- Integration der Charts API +- Dashboard-Layout mit Charts + +**Phase 2: Caching Layer** +- Redis für häufige Abfragen +- Cache invalidation strategy + +**Phase 3: Advanced Analytics** +- Machine Learning für Projektionen +- Anomaly Detection mit ML +- Personalisierte Empfehlungen + +--- + +**Erstellt:** 28. März 2026 +**Autor:** Claude Sonnet 4.5 +**Status:** Ready for Implementation +**Gitea Issue:** #53 (zu erstellen) diff --git a/docs/issues/issue-54-dynamic-placeholder-system.md b/docs/issues/issue-54-dynamic-placeholder-system.md new file mode 100644 index 0000000..2e9423a --- /dev/null +++ b/docs/issues/issue-54-dynamic-placeholder-system.md @@ -0,0 +1,765 @@ +# Issue #54: Dynamic Placeholder System + +**Status:** 📋 Planned (Post Phase 0c) +**Priorität:** Medium +**Aufwand:** 6-8h +**Erstellt:** 28. März 2026 +**Abhängigkeiten:** Phase 0c ✅ + +--- + +## Problem + +**Aktuell (Phase 0b/0c):** +```python +# backend/placeholder_resolver.py + +PLACEHOLDER_FUNCTIONS = { + "weight_aktuell": resolve_weight_aktuell, + "weight_trend": resolve_weight_trend, + # ... 50+ manual entries ... +} + +def get_placeholder_catalog(profile_id: str): + placeholders = { + 'Körper': [ + ('weight_aktuell', 'Aktuelles Gewicht in kg'), + ('weight_trend', 'Gewichtstrend (7d/30d)'), + # ... 50+ manual entries ... + ], + } +``` + +**Probleme:** +- ❌ Neue Platzhalter erfordern 3 Code-Änderungen: + 1. Funktion implementieren + 2. In `PLACEHOLDER_FUNCTIONS` registrieren + 3. In `get_placeholder_catalog()` dokumentieren +- ❌ Fehleranfällig (vergisst man einen Schritt → Bug) +- ❌ Katalog kann out-of-sync mit tatsächlich verfügbaren Platzhaltern sein +- ❌ Keine Introspection möglich (welche Platzhalter gibt es?) + +--- + +## Lösung: Auto-Discovery mit Decorators + +### Konzept + +```python +# 1. Decorator registriert Funktionen automatisch +@placeholder( + name="weight_aktuell", + category="Körper", + description="Aktuelles Gewicht in kg" +) +def resolve_weight_aktuell(profile_id: str) -> str: + ... + +# 2. Registry sammelt alle registrierten Platzhalter +PLACEHOLDER_REGISTRY = {} # Wird automatisch gefüllt + +# 3. Katalog wird aus Registry generiert +def get_placeholder_catalog(): + return generate_catalog_from_registry() +``` + +**Vorteile:** +- ✅ Nur 1 Stelle zu ändern (Decorator über Funktion) +- ✅ Auto-Sync: Katalog immer aktuell +- ✅ Introspection: Alle verfügbaren Platzhalter abrufbar +- ✅ Metadata direkt bei Funktion (Single Source of Truth) + +--- + +## Implementierung + +### Step 1: Decorator + Registry erstellen (2h) + +**Datei:** `backend/placeholder_resolver.py` + +```python +from functools import wraps +from typing import Dict, List, Callable + +# ── REGISTRY ───────────────────────────────────────────────────── + +PLACEHOLDER_REGISTRY: Dict[str, dict] = {} + +def placeholder( + name: str, + category: str, + description: str, + example: str = None +): + """ + Decorator to register a placeholder function. + + Usage: + @placeholder( + name="weight_aktuell", + category="Körper", + description="Aktuelles Gewicht in kg", + example="85.3 kg" + ) + def resolve_weight_aktuell(profile_id: str) -> str: + ... + + Args: + name: Placeholder key (used in templates as {{name}}) + category: Category for grouping (e.g., "Körper", "Ernährung") + description: Human-readable description + example: Optional example output + + Returns: + Decorated function (registered in PLACEHOLDER_REGISTRY) + """ + def decorator(func: Callable[[str], str]) -> Callable[[str], str]: + # Validate function signature + import inspect + sig = inspect.signature(func) + params = list(sig.parameters.keys()) + + if len(params) != 1 or params[0] != 'profile_id': + raise ValueError( + f"Placeholder function {func.__name__} must have signature: " + f"(profile_id: str) -> str" + ) + + if sig.return_annotation != str: + raise ValueError( + f"Placeholder function {func.__name__} must return str" + ) + + # Register in global registry + PLACEHOLDER_REGISTRY[name] = { + 'function': func, + 'category': category, + 'description': description, + 'example': example or "N/A", + 'function_name': func.__name__ + } + + @wraps(func) + def wrapper(profile_id: str) -> str: + return func(profile_id) + + return wrapper + + return decorator + + +# ── CATALOG GENERATION ─────────────────────────────────────────── + +def get_placeholder_catalog(profile_id: str = None) -> Dict[str, List[Dict[str, str]]]: + """ + Generate placeholder catalog from registry. + + Args: + profile_id: Optional - if provided, generates example values + + Returns: + { + "category": [ + { + "key": "placeholder_name", + "description": "...", + "example": "..." or computed value + }, + ... + ], + ... + } + """ + catalog = {} + + for name, meta in PLACEHOLDER_REGISTRY.items(): + category = meta['category'] + + if category not in catalog: + catalog[category] = [] + + # Generate example value if profile_id provided + example = meta['example'] + if profile_id and example == "N/A": + try: + example = meta['function'](profile_id) + except Exception as e: + example = f"Error: {str(e)}" + + catalog[category].append({ + 'key': name, + 'description': meta['description'], + 'example': example, + 'placeholder': f'{{{{{name}}}}}' # {{name}} + }) + + # Sort categories + sorted_catalog = {} + category_order = [ + 'Profil', 'Körper', 'Ernährung', 'Training', + 'Schlaf & Erholung', 'Vitalwerte', 'Scores', 'Focus Areas', 'Zeitraum' + ] + + for cat in category_order: + if cat in catalog: + sorted_catalog[cat] = sorted(catalog[cat], key=lambda x: x['key']) + + # Add any remaining categories not in order + for cat, items in catalog.items(): + if cat not in sorted_catalog: + sorted_catalog[cat] = sorted(items, key=lambda x: x['key']) + + return sorted_catalog + + +# ── PLACEHOLDER RESOLUTION ─────────────────────────────────────── + +def resolve_placeholders(template: str, profile_id: str) -> str: + """ + Resolve all placeholders in template. + + Uses PLACEHOLDER_REGISTRY (auto-populated by decorators). + """ + result = template + + for name, meta in PLACEHOLDER_REGISTRY.items(): + placeholder = f'{{{{{name}}}}}' + + if placeholder in result: + try: + value = meta['function'](profile_id) + result = result.replace(placeholder, str(value)) + except Exception as e: + # Log error but don't crash + import traceback + print(f"Error resolving {{{{{{name}}}}}}: {e}") + traceback.print_exc() + result = result.replace(placeholder, f"[Error: {name}]") + + return result + + +# ── API ENDPOINT ───────────────────────────────────────────────── + +def list_available_placeholders() -> List[str]: + """ + List all available placeholder names. + + Returns: + ["weight_aktuell", "weight_trend", ...] + """ + return sorted(PLACEHOLDER_REGISTRY.keys()) + + +def get_placeholder_metadata(name: str) -> dict: + """ + Get metadata for a specific placeholder. + + Args: + name: Placeholder key + + Returns: + { + "name": "weight_aktuell", + "category": "Körper", + "description": "...", + "example": "...", + "function_name": "resolve_weight_aktuell" + } + + Raises: + KeyError: If placeholder doesn't exist + """ + if name not in PLACEHOLDER_REGISTRY: + raise KeyError(f"Placeholder '{name}' not found") + + meta = PLACEHOLDER_REGISTRY[name].copy() + del meta['function'] # Don't expose function reference in API + meta['name'] = name + return meta +``` + +### Step 2: Platzhalter mit Decorator versehen (3-4h) + +**Migration-Strategie:** + +```python +# ALT (Phase 0b/0c): +def resolve_weight_aktuell(profile_id: str) -> str: + """Returns current weight""" + ... + +PLACEHOLDER_FUNCTIONS = { + "weight_aktuell": resolve_weight_aktuell, +} + + +# NEU (Issue #54): +@placeholder( + name="weight_aktuell", + category="Körper", + description="Aktuelles Gewicht in kg", + example="85.3 kg" +) +def resolve_weight_aktuell(profile_id: str) -> str: + """Returns current weight""" + ... + +# PLACEHOLDER_FUNCTIONS wird nicht mehr benötigt! +``` + +**Alle ~50 Platzhalter konvertieren:** +```python +# Profil +@placeholder(name="name", category="Profil", description="Name des Nutzers") +def resolve_name(profile_id: str) -> str: ... + +@placeholder(name="age", category="Profil", description="Alter in Jahren") +def resolve_age(profile_id: str) -> str: ... + +# Körper +@placeholder(name="weight_aktuell", category="Körper", description="Aktuelles Gewicht in kg") +def resolve_weight_aktuell(profile_id: str) -> str: ... + +@placeholder(name="weight_7d_median", category="Körper", description="Gewicht 7d Median (kg)") +def resolve_weight_7d_median(profile_id: str) -> str: ... + +# ... etc. für alle 50+ Platzhalter +``` + +### Step 3: API Endpoints erstellen (1h) + +**Datei:** `backend/routers/placeholders.py` (NEU) + +```python +from fastapi import APIRouter, Depends, HTTPException +from auth import require_auth +from placeholder_resolver import ( + get_placeholder_catalog, + list_available_placeholders, + get_placeholder_metadata, + resolve_placeholders +) + +router = APIRouter(prefix="/api/placeholders", tags=["placeholders"]) + + +@router.get("/catalog") +def get_catalog( + with_examples: bool = False, + session: dict = Depends(require_auth) +): + """ + Get grouped placeholder catalog. + + Args: + with_examples: If true, generates example values using user's data + + Returns: + { + "category": [ + { + "key": "placeholder_name", + "description": "...", + "example": "...", + "placeholder": "{{placeholder_name}}" + }, + ... + ], + ... + } + """ + profile_id = session['profile_id'] if with_examples else None + return get_placeholder_catalog(profile_id) + + +@router.get("/list") +def list_placeholders(): + """ + List all available placeholder names (no auth required). + + Returns: + ["weight_aktuell", "weight_trend", ...] + """ + return list_available_placeholders() + + +@router.get("/metadata/{name}") +def get_metadata(name: str): + """ + Get metadata for a specific placeholder (no auth required). + + Returns: + { + "name": "weight_aktuell", + "category": "Körper", + "description": "...", + "example": "...", + "function_name": "resolve_weight_aktuell" + } + """ + try: + return get_placeholder_metadata(name) + except KeyError: + raise HTTPException(status_code=404, detail=f"Placeholder '{name}' not found") + + +@router.post("/resolve") +def resolve_template( + template: str, + session: dict = Depends(require_auth) +): + """ + Resolve all placeholders in template. + + Args: + template: String with placeholders (e.g., "Dein Gewicht ist {{weight_aktuell}}") + + Returns: + { + "original": "...", + "resolved": "...", + "placeholders_found": ["weight_aktuell", ...], + "placeholders_resolved": ["weight_aktuell", ...], + "placeholders_failed": [] + } + """ + profile_id = session['profile_id'] + + # Find all placeholders in template + import re + found = re.findall(r'\{\{([^}]+)\}\}', template) + + # Resolve template + resolved = resolve_placeholders(template, profile_id) + + # Check which placeholders were resolved + resolved_list = [p for p in found if f'{{{{{p}}}}}' not in resolved] + failed_list = [p for p in found if f'{{{{{p}}}}}' in resolved] + + return { + "original": template, + "resolved": resolved, + "placeholders_found": found, + "placeholders_resolved": resolved_list, + "placeholders_failed": failed_list + } +``` + +**Router in main.py registrieren:** +```python +# backend/main.py + +from routers import placeholders # NEU + +app.include_router(placeholders.router) +``` + +### Step 4: Frontend Integration (1-2h) + +**Placeholder Browser Komponente:** + +```javascript +// frontend/src/components/PlaceholderBrowser.jsx + +import { useState, useEffect } from 'react' +import { api } from '../utils/api' + +export default function PlaceholderBrowser({ onSelect }) { + const [catalog, setCatalog] = useState({}) + const [loading, setLoading] = useState(true) + const [searchTerm, setSearchTerm] = useState('') + + useEffect(() => { + loadCatalog() + }, []) + + async function loadCatalog() { + try { + const data = await api.getPlaceholderCatalog(true) // with examples + setCatalog(data) + } catch (err) { + console.error('Failed to load catalog:', err) + } finally { + setLoading(false) + } + } + + function filterPlaceholders() { + if (!searchTerm) return catalog + + const filtered = {} + for (const [category, items] of Object.entries(catalog)) { + const matching = items.filter(p => + p.key.toLowerCase().includes(searchTerm.toLowerCase()) || + p.description.toLowerCase().includes(searchTerm.toLowerCase()) + ) + if (matching.length > 0) { + filtered[category] = matching + } + } + return filtered + } + + if (loading) return
+ + const filteredCatalog = filterPlaceholders() + + return ( +
+ setSearchTerm(e.target.value)} + className="form-input" + /> + + {Object.entries(filteredCatalog).map(([category, items]) => ( +
+

{category}

+
+ {items.map(p => ( +
onSelect && onSelect(p.placeholder)} + > +
{p.placeholder}
+
{p.description}
+ {p.example !== 'N/A' && ( +
+ Beispiel: {p.example} +
+ )} +
+ ))} +
+
+ ))} +
+ ) +} +``` + +**API Functions hinzufügen:** + +```javascript +// frontend/src/utils/api.js + +export const api = { + // ... existing functions ... + + // Placeholder System + getPlaceholderCatalog: async (withExamples = false) => { + return await apiFetch(`/api/placeholders/catalog?with_examples=${withExamples}`) + }, + + listPlaceholders: async () => { + return await apiFetch('/api/placeholders/list') + }, + + getPlaceholderMetadata: async (name) => { + return await apiFetch(`/api/placeholders/metadata/${name}`) + }, + + resolvePlaceholders: async (template) => { + return await apiFetch('/api/placeholders/resolve', { + method: 'POST', + body: JSON.stringify({ template }) + }) + } +} +``` + +--- + +## Vorteile nach Implementierung + +### Developer Experience +- ✅ Nur 1 Stelle ändern (Decorator) +- ✅ Automatische Validierung (Signatur-Check) +- ✅ IDE Auto-Complete für Decorator-Parameter +- ✅ Weniger Fehler (kein out-of-sync) + +### API Features +- ✅ `GET /api/placeholders/list` - Alle verfügbaren Platzhalter +- ✅ `GET /api/placeholders/catalog` - Gruppierter Katalog +- ✅ `GET /api/placeholders/metadata/{name}` - Details zu Platzhalter +- ✅ `POST /api/placeholders/resolve` - Template auflösen + +### Frontend Features +- ✅ Placeholder Browser mit Suche +- ✅ Live-Beispielwerte aus User-Daten +- ✅ Click-to-Insert in Prompt-Editor +- ✅ Auto-Complete beim Tippen + +--- + +## Migration-Plan + +### Phase 1: Backwards Compatible (2h) +```python +# Beide Systeme parallel unterstützen + +# 1. Decorator-System implementieren +@placeholder(...) +def resolve_weight_aktuell(profile_id: str) -> str: ... + +# 2. Legacy PLACEHOLDER_FUNCTIONS weiter unterstützen +PLACEHOLDER_FUNCTIONS = PLACEHOLDER_REGISTRY # Alias + +# 3. get_placeholder_catalog() nutzt Registry +``` + +### Phase 2: Migration (3h) +```python +# Alle 50+ Platzhalter mit Decorator versehen +# Ein Commit pro Kategorie: +# - commit 1: Profil (5 Platzhalter) +# - commit 2: Körper (12 Platzhalter) +# - commit 3: Ernährung (10 Platzhalter) +# - commit 4: Training (10 Platzhalter) +# - commit 5: Schlaf & Erholung (8 Platzhalter) +# - commit 6: Vitalwerte (6 Platzhalter) +# - commit 7: Rest (Scores, Focus Areas, Zeitraum) +``` + +### Phase 3: Cleanup (1h) +```python +# Legacy Code entfernen +# - PLACEHOLDER_FUNCTIONS Dictionary löschen +# - Alte get_placeholder_catalog() Logik löschen +``` + +--- + +## Testing + +### Unit Tests +```python +# backend/tests/test_placeholder_system.py + +def test_decorator_registration(): + """Test that decorator registers placeholder""" + @placeholder(name="test_ph", category="Test", description="Test") + def resolve_test(profile_id: str) -> str: + return "test_value" + + assert "test_ph" in PLACEHOLDER_REGISTRY + assert PLACEHOLDER_REGISTRY["test_ph"]["category"] == "Test" + +def test_invalid_signature(): + """Test that decorator validates function signature""" + with pytest.raises(ValueError): + @placeholder(name="bad", category="Test", description="Test") + def resolve_bad(profile_id: str, extra: str) -> str: # Wrong signature! + return "bad" + +def test_catalog_generation(): + """Test catalog generation from registry""" + catalog = get_placeholder_catalog() + + assert isinstance(catalog, dict) + assert "Körper" in catalog + assert len(catalog["Körper"]) > 0 + +def test_placeholder_resolution(): + """Test resolving placeholders in template""" + template = "Gewicht: {{weight_aktuell}}" + resolved = resolve_placeholders(template, "test_profile") + + assert "{{weight_aktuell}}" not in resolved + assert "kg" in resolved or "Nicht genug Daten" in resolved +``` + +### Integration Tests +```python +def test_api_catalog_endpoint(client, auth_token): + """Test /api/placeholders/catalog endpoint""" + response = client.get( + "/api/placeholders/catalog", + headers={"X-Auth-Token": auth_token} + ) + + assert response.status_code == 200 + data = response.json() + assert "Körper" in data + assert len(data["Körper"]) > 0 + +def test_api_resolve_endpoint(client, auth_token): + """Test /api/placeholders/resolve endpoint""" + response = client.post( + "/api/placeholders/resolve", + headers={"X-Auth-Token": auth_token}, + json={"template": "Gewicht: {{weight_aktuell}}"} + ) + + assert response.status_code == 200 + data = response.json() + assert "resolved" in data + assert "{{weight_aktuell}}" not in data["resolved"] +``` + +--- + +## Acceptance Criteria + +✅ **Issue #54 ist abgeschlossen, wenn:** + +### Backend +- ✅ `@placeholder` Decorator implementiert +- ✅ `PLACEHOLDER_REGISTRY` automatisch gefüllt +- ✅ `get_placeholder_catalog()` nutzt Registry +- ✅ Alle 50+ Platzhalter mit Decorator versehen +- ✅ Legacy `PLACEHOLDER_FUNCTIONS` entfernt +- ✅ API Endpoints implementiert (/list, /catalog, /metadata, /resolve) +- ✅ Unit Tests geschrieben (>80% coverage) + +### Frontend +- ✅ `PlaceholderBrowser` Komponente erstellt +- ✅ Suche funktioniert +- ✅ Click-to-Insert funktioniert +- ✅ Live-Beispielwerte werden angezeigt +- ✅ Integration in Prompt-Editor + +### Dokumentation +- ✅ `PLACEHOLDER_DEVELOPMENT_GUIDE.md` aktualisiert +- ✅ API-Dokumentation erstellt +- ✅ CLAUDE.md aktualisiert + +--- + +## Ausblick: Future Enhancements + +### Auto-Discovery von Data Layer Funktionen + +**Nach Phase 0c:** Data Layer Funktionen könnten automatisch als Platzhalter erkannt werden: + +```python +# backend/data_layer/body_metrics.py + +@data_function( + provides_placeholders=[ + ("weight_7d_median", "Gewicht 7d Median (kg)"), + ("weight_28d_slope", "Gewichtstrend 28d (kg/Tag)"), + ] +) +def get_weight_trend_data(profile_id: str, days: int = 90) -> dict: + ... + +# Automatisch generierte Platzhalter: +@placeholder(name="weight_7d_median", category="Körper", description="...") +def resolve_weight_7d_median(profile_id: str) -> str: + data = get_weight_trend_data(profile_id, days=7) + return f"{data['rolling_median_7d'][-1][1]:.1f} kg" +``` + +**Vorteil:** Data Layer Funktionen automatisch als Platzhalter verfügbar. + +--- + +**Erstellt:** 28. März 2026 +**Autor:** Claude Sonnet 4.5 +**Status:** Planned (Post Phase 0c) +**Geschätzter Aufwand:** 6-8h diff --git a/docs/phase-0c-placeholder-migration-analysis.md b/docs/phase-0c-placeholder-migration-analysis.md new file mode 100644 index 0000000..ae5dac4 --- /dev/null +++ b/docs/phase-0c-placeholder-migration-analysis.md @@ -0,0 +1,422 @@ +# Phase 0c: Placeholder Migration Analysis + +**Erstellt:** 28. März 2026 +**Zweck:** Analyse welche Platzhalter zu Data Layer migriert werden müssen + +--- + +## Gesamt-Übersicht + +**Aktuelle Platzhalter:** 116 +**Nach Phase 0c Migration:** +- ✅ **Bleiben einfach (kein Data Layer):** 8 Platzhalter +- 🔄 **Gehen zu Data Layer:** 108 Platzhalter + +--- + +## Kategorisierung: BLEIBEN EINFACH (8 Platzhalter) + +Diese Platzhalter bleiben im KI Layer (placeholder_resolver.py) weil sie: +- Keine Berechnungen durchführen +- Keine Daten-Aggregation benötigen +- Einfache Getter oder Konstanten sind + +### Zeitraum (4 Platzhalter) +```python +'{{datum_heute}}': lambda pid: datetime.now().strftime('%d.%m.%Y') +'{{zeitraum_7d}}': lambda pid: 'letzte 7 Tage' +'{{zeitraum_30d}}': lambda pid: 'letzte 30 Tage' +'{{zeitraum_90d}}': lambda pid: 'letzte 90 Tage' +``` +**Begründung:** Konstanten oder einfache Datum-Formatierung. Kein Data Layer nötig. + +### Profil - Basis (4 Platzhalter) +```python +'{{name}}': lambda pid: get_profile_data(pid).get('name', 'Nutzer') +'{{age}}': lambda pid: calculate_age(get_profile_data(pid).get('dob')) +'{{height}}': lambda pid: str(get_profile_data(pid).get('height', 'unbekannt')) +'{{geschlecht}}': lambda pid: 'männlich' if get_profile_data(pid).get('sex') == 'm' else 'weiblich' +``` +**Begründung:** Direkte Getter aus profiles Tabelle. Keine Aggregation. + +--- + +## GEHEN ZU DATA LAYER (108 Platzhalter) + +### 1. Körper (20 Platzhalter) → `data_layer.body_metrics` + +#### Basis-Metriken (8): +```python +'{{weight_aktuell}}' → get_weight_trend_data()['last_value'] +'{{weight_trend}}' → get_weight_trend_data() (formatiert) +'{{kf_aktuell}}' → get_body_composition_data()['body_fat_pct'][-1] +'{{bmi}}' → get_body_composition_data() (berechnet) +'{{caliper_summary}}' → get_caliper_summary_data() +'{{circ_summary}}' → get_circumference_summary() +'{{goal_weight}}' → get_active_goals() (filtered) +'{{goal_bf_pct}}' → get_active_goals() (filtered) +``` + +#### Phase 0b - Advanced Body (12): +```python +'{{weight_7d_median}}' → get_weight_trend_data()['rolling_median_7d'][-1] +'{{weight_28d_slope}}' → get_weight_trend_data()['slope_28d'] +'{{weight_90d_slope}}' → get_weight_trend_data()['slope_90d'] +'{{fm_28d_change}}' → get_body_composition_data()['fm_delta_28d'] +'{{lbm_28d_change}}' → get_body_composition_data()['lbm_delta_28d'] +'{{waist_28d_delta}}' → get_circumference_summary()['changes']['waist_28d'] +'{{hip_28d_delta}}' → get_circumference_summary()['changes']['hip_28d'] +'{{chest_28d_delta}}' → get_circumference_summary()['changes']['chest_28d'] +'{{arm_28d_delta}}' → get_circumference_summary()['changes']['arm_28d'] +'{{thigh_28d_delta}}' → get_circumference_summary()['changes']['thigh_28d'] +'{{waist_hip_ratio}}' → get_circumference_summary()['ratios']['waist_to_hip'] +'{{recomposition_quadrant}}'→ get_body_composition_data()['recomposition_score'] +``` + +**Data Layer Funktionen benötigt:** +- `get_weight_trend_data(profile_id, days=90)` +- `get_body_composition_data(profile_id, days=90)` +- `get_circumference_summary(profile_id, days=90)` +- `get_caliper_summary_data(profile_id, days=90)` + +--- + +### 2. Ernährung (14 Platzhalter) → `data_layer.nutrition_metrics` + +#### Basis-Metriken (7): +```python +'{{kcal_avg}}' → get_energy_balance_data()['avg_intake'] +'{{protein_avg}}' → get_protein_adequacy_data()['avg_protein_g'] +'{{carb_avg}}' → get_macro_distribution_data()['avg_carbs_g'] +'{{fat_avg}}' → get_macro_distribution_data()['avg_fat_g'] +'{{nutrition_days}}' → get_energy_balance_data()['data_points'] +'{{protein_ziel_low}}' → get_protein_adequacy_data()['target_protein_g'] (low) +'{{protein_ziel_high}}' → get_protein_adequacy_data()['target_protein_g'] (high) +``` + +#### Phase 0b - Advanced Nutrition (7): +```python +'{{energy_balance_7d}}' → get_energy_balance_data()['avg_net'] +'{{energy_deficit_surplus}}'→ get_energy_balance_data()['deficit_surplus_avg'] +'{{protein_g_per_kg}}' → get_protein_adequacy_data()['avg_protein_per_kg'] +'{{protein_days_in_target}}'→ get_protein_adequacy_data()['adherence_pct'] +'{{protein_adequacy_28d}}' → get_protein_adequacy_data()['adherence_score'] +'{{macro_consistency_score}}'→ get_macro_distribution_data()['balance_score'] +'{{intake_volatility}}' → get_macro_distribution_data()['variability'] +``` + +**Data Layer Funktionen benötigt:** +- `get_protein_adequacy_data(profile_id, days=28, goal_mode=None)` +- `get_energy_balance_data(profile_id, days=28)` +- `get_macro_distribution_data(profile_id, days=28)` + +--- + +### 3. Training (16 Platzhalter) → `data_layer.activity_metrics` + +#### Basis-Metriken (3): +```python +'{{activity_summary}}' → get_training_volume_data()['weekly_totals'] (formatted) +'{{activity_detail}}' → get_training_volume_data()['by_type'] (formatted) +'{{trainingstyp_verteilung}}'→ get_activity_quality_distribution() +``` + +#### Phase 0b - Advanced Activity (13): +```python +'{{training_minutes_week}}' → get_training_volume_data()['weekly_totals'][0]['duration_min'] +'{{training_frequency_7d}}' → get_training_volume_data()['weekly_totals'][0]['sessions'] +'{{quality_sessions_pct}}' → get_activity_quality_distribution()['high_quality_pct'] +'{{ability_balance_strength}}' → get_ability_balance_data()['abilities']['strength'] +'{{ability_balance_endurance}}'→ get_ability_balance_data()['abilities']['cardio'] +'{{ability_balance_mental}}' → get_ability_balance_data()['abilities']['mental'] +'{{ability_balance_coordination}}'→ get_ability_balance_data()['abilities']['coordination'] +'{{ability_balance_mobility}}' → get_ability_balance_data()['abilities']['mobility'] +'{{proxy_internal_load_7d}}'→ get_training_volume_data()['strain'] +'{{monotony_score}}' → get_training_volume_data()['monotony'] +'{{strain_score}}' → get_training_volume_data()['strain'] +'{{rest_day_compliance}}' → get_recovery_score_data()['components']['rest_compliance']['score'] +'{{vo2max_trend_28d}}' → get_vitals_baseline_data()['vo2_max']['trend'] +``` + +**Data Layer Funktionen benötigt:** +- `get_training_volume_data(profile_id, weeks=4)` +- `get_activity_quality_distribution(profile_id, days=28)` +- `get_ability_balance_data(profile_id, weeks=4)` + +--- + +### 4. Schlaf & Erholung (10 Platzhalter) → `data_layer.recovery_metrics` + +#### Basis-Metriken (3): +```python +'{{sleep_avg_duration}}' → get_sleep_regularity_data()['avg_duration_h'] +'{{sleep_avg_quality}}' → get_sleep_regularity_data()['avg_quality'] +'{{rest_days_count}}' → get_recovery_score_data()['components']['rest_compliance']['rest_days'] +``` + +#### Phase 0b - Advanced Recovery (7): +```python +'{{hrv_vs_baseline_pct}}' → get_vitals_baseline_data()['hrv']['deviation_pct'] +'{{rhr_vs_baseline_pct}}' → get_vitals_baseline_data()['rhr']['deviation_pct'] +'{{sleep_avg_duration_7d}}' → get_sleep_regularity_data()['avg_duration_h'] +'{{sleep_debt_hours}}' → get_sleep_regularity_data()['sleep_debt_h'] +'{{sleep_regularity_proxy}}'→ get_sleep_regularity_data()['regularity_score'] +'{{recent_load_balance_3d}}'→ get_recovery_score_data()['load_balance'] +'{{sleep_quality_7d}}' → get_sleep_regularity_data()['avg_quality'] +``` + +**Data Layer Funktionen benötigt:** +- `get_recovery_score_data(profile_id, days=7)` +- `get_sleep_regularity_data(profile_id, days=28)` +- `get_vitals_baseline_data(profile_id, days=7)` + +--- + +### 5. Vitalwerte (3 Platzhalter) → `data_layer.health_metrics` + +```python +'{{vitals_avg_hr}}' → get_vitals_baseline_data()['rhr']['current'] +'{{vitals_avg_hrv}}' → get_vitals_baseline_data()['hrv']['current'] +'{{vitals_vo2_max}}' → get_vitals_baseline_data()['vo2_max']['current'] +``` + +**Data Layer Funktionen benötigt:** +- `get_vitals_baseline_data(profile_id, days=7)` (bereits in recovery) + +--- + +### 6. Scores (6 Platzhalter) → Diverse Module + +```python +'{{goal_progress_score}}' → get_goal_progress_data() → goals.py +'{{body_progress_score}}' → get_body_composition_data() → body_metrics.py +'{{nutrition_score}}' → get_protein_adequacy_data() → nutrition_metrics.py +'{{activity_score}}' → get_training_volume_data() → activity_metrics.py +'{{recovery_score}}' → get_recovery_score_data()['score'] → recovery_metrics.py +'{{data_quality_score}}' → get_data_quality_score() → utils.py (NEW) +``` + +**Hinweis:** Scores nutzen bestehende Data Layer Funktionen, nur Formatierung nötig. + +--- + +### 7. Top Goals/Focus (5 Platzhalter) → `data_layer.goals` + +```python +'{{top_goal_name}}' → get_active_goals()[0]['name'] +'{{top_goal_progress_pct}}' → get_active_goals()[0]['progress_pct'] +'{{top_goal_status}}' → get_active_goals()[0]['status'] +'{{top_focus_area_name}}' → get_weighted_focus_areas()[0]['name'] +'{{top_focus_area_progress}}'→ get_weighted_focus_areas()[0]['progress'] +``` + +**Data Layer Funktionen benötigt:** +- `get_active_goals(profile_id)` (already exists from Phase 0b) +- `get_weighted_focus_areas(profile_id)` (already exists from Phase 0b) + +--- + +### 8. Category Scores (14 Platzhalter) → Formatierung nur + +```python +'{{focus_cat_körper_progress}}' → _format_from_aggregated_data() +'{{focus_cat_körper_weight}}' → _format_from_aggregated_data() +'{{focus_cat_ernährung_progress}}' → _format_from_aggregated_data() +'{{focus_cat_ernährung_weight}}' → _format_from_aggregated_data() +# ... (7 Kategorien × 2 = 14 total) +``` + +**Hinweis:** Diese nutzen bereits aggregierte Daten aus Phase 0b. +**Migration:** Nur KI Layer Formatierung, Data Layer nicht nötig (Daten kommen aus anderen Funktionen). + +--- + +### 9. Korrelationen (7 Platzhalter) → `data_layer.correlations` + +```python +'{{correlation_energy_weight_lag}}' → get_correlation_data(pid, 'energy', 'weight') +'{{correlation_protein_lbm}}' → get_correlation_data(pid, 'protein', 'lbm') +'{{correlation_load_hrv}}' → get_correlation_data(pid, 'load', 'hrv') +'{{correlation_load_rhr}}' → get_correlation_data(pid, 'load', 'rhr') +'{{correlation_sleep_recovery}}' → get_correlation_data(pid, 'sleep', 'recovery') +'{{plateau_detected}}' → detect_plateau(pid, 'weight') +'{{top_drivers}}' → get_top_drivers(pid) +``` + +**Data Layer Funktionen benötigt:** +- `get_correlation_data(profile_id, metric_a, metric_b, days=90, max_lag=7)` +- `detect_plateau(profile_id, metric, days=28)` +- `get_top_drivers(profile_id)` (NEW - identifies top correlations) + +--- + +### 10. JSON/Markdown (8 Platzhalter) → Formatierung nur + +```python +'{{active_goals_json}}' → json.dumps(get_active_goals(pid)) +'{{active_goals_md}}' → format_as_markdown(get_active_goals(pid)) +'{{focus_areas_weighted_json}}' → json.dumps(get_weighted_focus_areas(pid)) +'{{focus_areas_weighted_md}}' → format_as_markdown(get_weighted_focus_areas(pid)) +'{{focus_area_weights_json}}' → json.dumps(get_focus_area_weights(pid)) +'{{top_3_focus_areas}}' → format_top_3(get_weighted_focus_areas(pid)) +'{{top_3_goals_behind_schedule}}' → format_goals_behind(get_active_goals(pid)) +'{{top_3_goals_on_track}}' → format_goals_on_track(get_active_goals(pid)) +``` + +**Hinweis:** Diese nutzen bereits existierende Data Layer Funktionen. +**Migration:** Nur KI Layer Formatierung (json.dumps, markdown, etc.). + +--- + +## Data Layer Funktionen - Zusammenfassung + +### Neue Funktionen zu erstellen (Phase 0c): + +#### body_metrics.py (4 Funktionen): +- ✅ `get_weight_trend_data()` +- ✅ `get_body_composition_data()` +- ✅ `get_circumference_summary()` +- ✅ `get_caliper_summary_data()` + +#### nutrition_metrics.py (3 Funktionen): +- ✅ `get_protein_adequacy_data()` +- ✅ `get_energy_balance_data()` +- ✅ `get_macro_distribution_data()` + +#### activity_metrics.py (3 Funktionen): +- ✅ `get_training_volume_data()` +- ✅ `get_activity_quality_distribution()` +- ✅ `get_ability_balance_data()` + +#### recovery_metrics.py (2 Funktionen): +- ✅ `get_recovery_score_data()` +- ✅ `get_sleep_regularity_data()` + +#### health_metrics.py (2 Funktionen): +- ✅ `get_vitals_baseline_data()` +- ✅ `get_blood_pressure_data()` (aus Spec) + +#### goals.py (3 Funktionen): +- ✅ `get_active_goals()` (exists from Phase 0b) +- ✅ `get_weighted_focus_areas()` (exists from Phase 0b) +- ✅ `get_goal_progress_data()` (aus Spec) + +#### correlations.py (3 Funktionen): +- ✅ `get_correlation_data()` +- ✅ `detect_plateau()` +- 🆕 `get_top_drivers()` (NEW - not in spec) + +#### utils.py (Shared): +- ✅ `calculate_confidence()` +- ✅ `calculate_baseline()` +- ✅ `detect_outliers()` +- ✅ `aggregate_data()` +- ✅ `serialize_dates()` +- 🆕 `get_data_quality_score()` (NEW) + +**Total neue Funktionen:** 20 (aus Spec) + 2 (zusätzlich) = **22 Data Layer Funktionen** + +--- + +## Migration-Aufwand pro Kategorie + +| Kategorie | Platzhalter | Data Layer Funcs | Aufwand | Priorität | +|-----------|-------------|------------------|---------|-----------| +| Körper | 20 | 4 | 3-4h | High | +| Ernährung | 14 | 3 | 2-3h | High | +| Training | 16 | 3 | 3-4h | Medium | +| Recovery | 10 | 2 | 2-3h | Medium | +| Vitalwerte | 3 | 1 (shared) | 0.5h | Low | +| Scores | 6 | 0 (use others) | 1h | Low | +| Goals/Focus | 5 | 0 (exists) | 0.5h | Low | +| Categories | 14 | 0 (formatting) | 1h | Low | +| Korrelationen | 7 | 3 | 2-3h | Medium | +| JSON/Markdown | 8 | 0 (formatting) | 0.5h | Low | +| **TOTAL** | **108** | **22** | **16-22h** | - | + +--- + +## KI Layer Refactoring-Muster + +**VORHER (Phase 0b):** +```python +def get_latest_weight(profile_id: str) -> str: + """Returns latest weight with SQL + formatting""" + with get_db() as conn: + cur = get_cursor(conn) + cur.execute(""" + SELECT weight FROM weight_log + WHERE profile_id = %s + ORDER BY date DESC LIMIT 1 + """, (profile_id,)) + row = cur.fetchone() + if not row: + return "nicht verfügbar" + return f"{row['weight']:.1f} kg" + +PLACEHOLDER_MAP = { + '{{weight_aktuell}}': get_latest_weight, +} +``` + +**NACHHER (Phase 0c):** +```python +from data_layer.body_metrics import get_weight_trend_data + +def resolve_weight_aktuell(profile_id: str) -> str: + """Returns latest weight (formatted for KI)""" + data = get_weight_trend_data(profile_id, days=7) + + if data['confidence'] == 'insufficient': + return "nicht verfügbar" + + return f"{data['last_value']:.1f} kg" + +PLACEHOLDER_MAP = { + '{{weight_aktuell}}': resolve_weight_aktuell, +} +``` + +**Reduzierung:** Von ~15 Zeilen (SQL + Logic) zu ~7 Zeilen (Call + Format) + +--- + +## Erwartetes Ergebnis nach Phase 0c + +### Zeilen-Reduktion: +- **placeholder_resolver.py:** + - Vorher: ~1200 Zeilen + - Nachher: ~400 Zeilen (67% Reduktion) + +### Code-Qualität: +- ✅ Keine SQL queries in placeholder_resolver.py +- ✅ Keine Berechnungslogik in placeholder_resolver.py +- ✅ Nur Formatierung für KI-Consumption + +### Wiederverwendbarkeit: +- ✅ 22 Data Layer Funktionen nutzbar für: + - KI Layer (108 Platzhalter) + - Charts Layer (10+ Charts) + - API Endpoints (beliebig erweiterbar) + +--- + +## Checkliste: Migration pro Platzhalter + +Für jeden der **108 Platzhalter**: + +``` +[ ] Data Layer Funktion existiert +[ ] KI Layer ruft Data Layer Funktion auf +[ ] Formatierung für KI korrekt +[ ] Fehlerbehandlung (insufficient data) +[ ] Test: Platzhalter liefert gleichen Output wie vorher +[ ] In PLACEHOLDER_MAP registriert +[ ] Dokumentiert +``` + +--- + +**Erstellt:** 28. März 2026 +**Status:** Ready for Phase 0c Implementation +**Nächster Schritt:** Data Layer Funktionen implementieren (Start mit utils.py)