mitai-jinkendo/backend/routers/charts.py
Lars c21a624a50
All checks were successful
Deploy Development / deploy (push) Successful in 50s
Build Test / lint-backend (push) Successful in 1s
Build Test / build-frontend (push) Successful in 14s
fix: E2 protein-adequacy endpoint - undefined variable 'values' -> 'daily_values'
2026-03-29 07:38:04 +02:00

2718 lines
82 KiB
Python

"""
Charts Router - Chart.js-compatible Data Endpoints
Provides structured data for frontend charts/diagrams.
All endpoints return Chart.js-compatible JSON format:
{
"chart_type": "line" | "bar" | "scatter" | "pie",
"data": {
"labels": [...],
"datasets": [...]
},
"metadata": {
"confidence": "high" | "medium" | "low" | "insufficient",
"data_points": int,
...
}
}
Phase 0c: Multi-Layer Architecture
Version: 1.0
"""
from fastapi import APIRouter, Depends, HTTPException, Query
from typing import Dict, List, Optional
from datetime import datetime, timedelta
from auth import require_auth
from data_layer.body_metrics import (
get_weight_trend_data,
get_body_composition_data,
get_circumference_summary_data
)
from data_layer.nutrition_metrics import (
get_nutrition_average_data,
get_protein_targets_data,
get_protein_adequacy_data,
get_macro_consistency_data
)
from data_layer.activity_metrics import (
get_activity_summary_data,
get_training_type_distribution_data,
calculate_training_minutes_week,
calculate_quality_sessions_pct,
calculate_proxy_internal_load_7d,
calculate_monotony_score,
calculate_strain_score,
calculate_ability_balance
)
from data_layer.recovery_metrics import (
get_sleep_duration_data,
get_sleep_quality_data,
calculate_recovery_score_v2,
calculate_hrv_vs_baseline_pct,
calculate_rhr_vs_baseline_pct,
calculate_sleep_debt_hours
)
from data_layer.correlations import (
calculate_lag_correlation,
calculate_correlation_sleep_recovery,
calculate_top_drivers
)
from data_layer.utils import serialize_dates, safe_float, calculate_confidence
router = APIRouter(prefix="/api/charts", tags=["charts"])
# ── Body Charts ─────────────────────────────────────────────────────────────
@router.get("/weight-trend")
def get_weight_trend_chart(
days: int = Query(default=90, ge=7, le=365, description="Analysis window in days"),
session: dict = Depends(require_auth)
) -> Dict:
"""
Weight trend chart data.
Returns Chart.js-compatible line chart with:
- Raw weight values
- Trend line (if enough data)
- Confidence indicator
Args:
days: Analysis window (7-365 days, default 90)
session: Auth session (injected)
Returns:
{
"chart_type": "line",
"data": {
"labels": ["2026-01-01", ...],
"datasets": [{
"label": "Gewicht",
"data": [85.0, 84.5, ...],
"borderColor": "#1D9E75",
"backgroundColor": "rgba(29, 158, 117, 0.1)",
"tension": 0.4
}]
},
"metadata": {
"confidence": "high",
"data_points": 60,
"first_value": 92.0,
"last_value": 85.0,
"delta": -7.0,
"direction": "decreasing"
}
}
Metadata fields:
- confidence: Data quality ("high", "medium", "low", "insufficient")
- data_points: Number of weight entries in period
- first_value: First weight in period (kg)
- last_value: Latest weight (kg)
- delta: Weight change (kg, negative = loss)
- direction: "increasing" | "decreasing" | "stable"
"""
profile_id = session['profile_id']
# Get structured data from data layer
trend_data = get_weight_trend_data(profile_id, days)
# Early return if insufficient data
if trend_data['confidence'] == 'insufficient':
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Nicht genug Daten für Trend-Analyse"
}
}
# Get raw data points for chart
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT date, weight FROM weight_log
WHERE profile_id=%s AND date >= %s
ORDER BY date""",
(profile_id, cutoff)
)
rows = cur.fetchall()
# Format for Chart.js
labels = [row['date'].isoformat() for row in rows]
values = [float(row['weight']) for row in rows]
return {
"chart_type": "line",
"data": {
"labels": labels,
"datasets": [
{
"label": "Gewicht",
"data": values,
"borderColor": "#1D9E75",
"backgroundColor": "rgba(29, 158, 117, 0.1)",
"borderWidth": 2,
"tension": 0.4,
"fill": True,
"pointRadius": 3,
"pointHoverRadius": 5
}
]
},
"metadata": serialize_dates({
"confidence": trend_data['confidence'],
"data_points": trend_data['data_points'],
"first_value": trend_data['first_value'],
"last_value": trend_data['last_value'],
"delta": trend_data['delta'],
"direction": trend_data['direction'],
"first_date": trend_data['first_date'],
"last_date": trend_data['last_date'],
"days_analyzed": trend_data['days_analyzed']
})
}
@router.get("/body-composition")
def get_body_composition_chart(
days: int = Query(default=90, ge=7, le=365),
session: dict = Depends(require_auth)
) -> Dict:
"""
Body composition chart (body fat percentage, lean mass trend).
Returns Chart.js-compatible multi-line chart.
Args:
days: Analysis window (7-365 days, default 90)
session: Auth session (injected)
Returns:
Chart.js format with datasets for:
- Body fat percentage (%)
- Lean mass (kg, if weight available)
"""
profile_id = session['profile_id']
# Get latest body composition
comp_data = get_body_composition_data(profile_id, days)
if comp_data['confidence'] == 'insufficient':
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Körperfett-Messungen vorhanden"
}
}
# For PoC: Return single data point
# TODO in bulk migration: Fetch historical caliper entries
return {
"chart_type": "line",
"data": {
"labels": [comp_data['date'].isoformat() if comp_data['date'] else ""],
"datasets": [
{
"label": "Körperfett %",
"data": [comp_data['body_fat_pct']],
"borderColor": "#D85A30",
"backgroundColor": "rgba(216, 90, 48, 0.1)",
"borderWidth": 2
}
]
},
"metadata": serialize_dates({
"confidence": comp_data['confidence'],
"data_points": comp_data['data_points'],
"body_fat_pct": comp_data['body_fat_pct'],
"method": comp_data['method'],
"date": comp_data['date']
})
}
@router.get("/circumferences")
def get_circumferences_chart(
max_age_days: int = Query(default=90, ge=7, le=365),
session: dict = Depends(require_auth)
) -> Dict:
"""
Latest circumference measurements as bar chart.
Shows most recent measurement for each body point.
Args:
max_age_days: Maximum age of measurements (default 90)
session: Auth session (injected)
Returns:
Chart.js bar chart with all circumference points
"""
profile_id = session['profile_id']
circ_data = get_circumference_summary_data(profile_id, max_age_days)
if circ_data['confidence'] == 'insufficient':
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Umfangsmessungen vorhanden"
}
}
# Sort by value (descending) for better visualization
measurements = sorted(
circ_data['measurements'],
key=lambda m: m['value'],
reverse=True
)
labels = [m['point'] for m in measurements]
values = [m['value'] for m in measurements]
return {
"chart_type": "bar",
"data": {
"labels": labels,
"datasets": [
{
"label": "Umfang (cm)",
"data": values,
"backgroundColor": "#1D9E75",
"borderColor": "#085041",
"borderWidth": 1
}
]
},
"metadata": serialize_dates({
"confidence": circ_data['confidence'],
"data_points": circ_data['data_points'],
"newest_date": circ_data['newest_date'],
"oldest_date": circ_data['oldest_date'],
"measurements": circ_data['measurements'] # Full details
})
}
# ── Nutrition Charts ────────────────────────────────────────────────────────
@router.get("/energy-balance")
def get_energy_balance_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Energy balance timeline (E1) - Konzept-konform.
Shows:
- Daily calorie intake
- 7d rolling average
- 14d rolling average
- TDEE reference line
- Energy deficit/surplus
- Lagged comparison to weight trend
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js line chart with multiple datasets
"""
profile_id = session['profile_id']
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT date, kcal
FROM nutrition_log
WHERE profile_id=%s AND date >= %s AND kcal IS NOT NULL
ORDER BY date""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows or len(rows) < 3:
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": len(rows) if rows else 0,
"message": "Nicht genug Ernährungsdaten (min. 3 Tage)"
}
}
# Prepare data
labels = []
daily_values = []
avg_7d = []
avg_14d = []
for i, row in enumerate(rows):
labels.append(row['date'].isoformat())
daily_values.append(safe_float(row['kcal']))
# 7d rolling average
start_7d = max(0, i - 6)
window_7d = [safe_float(rows[j]['kcal']) for j in range(start_7d, i + 1)]
avg_7d.append(round(sum(window_7d) / len(window_7d), 1) if window_7d else None)
# 14d rolling average
start_14d = max(0, i - 13)
window_14d = [safe_float(rows[j]['kcal']) for j in range(start_14d, i + 1)]
avg_14d.append(round(sum(window_14d) / len(window_14d), 1) if window_14d else None)
# Calculate TDEE (estimated, should come from profile)
# TODO: Calculate from profile (weight, height, age, activity level)
estimated_tdee = 2500.0
# Calculate deficit/surplus
avg_intake = sum(daily_values) / len(daily_values) if daily_values else 0
energy_balance = avg_intake - estimated_tdee
datasets = [
{
"label": "Kalorien (täglich)",
"data": daily_values,
"borderColor": "#1D9E7599",
"backgroundColor": "rgba(29, 158, 117, 0.1)",
"borderWidth": 1.5,
"tension": 0.2,
"fill": False,
"pointRadius": 2
},
{
"label": "Ø 7 Tage",
"data": avg_7d,
"borderColor": "#1D9E75",
"borderWidth": 2.5,
"tension": 0.3,
"fill": False,
"pointRadius": 0
},
{
"label": "Ø 14 Tage",
"data": avg_14d,
"borderColor": "#085041",
"borderWidth": 2,
"tension": 0.3,
"fill": False,
"pointRadius": 0,
"borderDash": [6, 3]
},
{
"label": "TDEE (geschätzt)",
"data": [estimated_tdee] * len(labels),
"borderColor": "#888",
"borderWidth": 1,
"borderDash": [5, 5],
"fill": False,
"pointRadius": 0
}
]
from data_layer.utils import calculate_confidence
confidence = calculate_confidence(len(rows), days, "general")
return {
"chart_type": "line",
"data": {
"labels": labels,
"datasets": datasets
},
"metadata": serialize_dates({
"confidence": confidence,
"data_points": len(rows),
"avg_kcal": round(avg_intake, 1),
"estimated_tdee": estimated_tdee,
"energy_balance": round(energy_balance, 1),
"balance_status": "deficit" if energy_balance < -200 else "surplus" if energy_balance > 200 else "maintenance",
"first_date": rows[0]['date'],
"last_date": rows[-1]['date']
})
}
@router.get("/macro-distribution")
def get_macro_distribution_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Macronutrient distribution pie chart (E2).
Shows average protein/carbs/fat distribution over period.
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js pie chart with macro percentages
"""
profile_id = session['profile_id']
# Get average macros
macro_data = get_nutrition_average_data(profile_id, days)
if macro_data['confidence'] == 'insufficient':
return {
"chart_type": "pie",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Ernährungsdaten vorhanden"
}
}
# Calculate calories from macros (protein/carbs = 4 kcal/g, fat = 9 kcal/g)
protein_kcal = macro_data['protein_avg'] * 4
carbs_kcal = macro_data['carbs_avg'] * 4
fat_kcal = macro_data['fat_avg'] * 9
total_kcal = protein_kcal + carbs_kcal + fat_kcal
if total_kcal == 0:
return {
"chart_type": "pie",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Makronährstoff-Daten"
}
}
protein_pct = (protein_kcal / total_kcal * 100)
carbs_pct = (carbs_kcal / total_kcal * 100)
fat_pct = (fat_kcal / total_kcal * 100)
return {
"chart_type": "pie",
"data": {
"labels": ["Protein", "Kohlenhydrate", "Fett"],
"datasets": [
{
"data": [round(protein_pct, 1), round(carbs_pct, 1), round(fat_pct, 1)],
"backgroundColor": [
"#1D9E75", # Protein (green)
"#F59E0B", # Carbs (amber)
"#EF4444" # Fat (red)
],
"borderWidth": 2,
"borderColor": "#fff"
}
]
},
"metadata": {
"confidence": macro_data['confidence'],
"data_points": macro_data['data_points'],
"protein_g": round(macro_data['protein_avg'], 1),
"carbs_g": round(macro_data['carbs_avg'], 1),
"fat_g": round(macro_data['fat_avg'], 1),
"protein_pct": round(protein_pct, 1),
"carbs_pct": round(carbs_pct, 1),
"fat_pct": round(fat_pct, 1)
}
}
@router.get("/protein-adequacy")
def get_protein_adequacy_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Protein adequacy timeline (E2) - Konzept-konform.
Shows:
- Daily protein intake
- 7d rolling average
- 28d rolling average
- Target range bands
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js line chart with protein intake + averages + target bands
"""
profile_id = session['profile_id']
# Get protein targets
targets = get_protein_targets_data(profile_id)
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT date, protein_g
FROM nutrition_log
WHERE profile_id=%s AND date >= %s AND protein_g IS NOT NULL
ORDER BY date""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows or len(rows) < 3:
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": len(rows) if rows else 0,
"message": "Nicht genug Protein-Daten (min. 3 Tage)"
}
}
# Prepare data
labels = []
daily_values = []
avg_7d = []
avg_28d = []
for i, row in enumerate(rows):
labels.append(row['date'].isoformat())
daily_values.append(safe_float(row['protein_g']))
# 7d rolling average
start_7d = max(0, i - 6)
window_7d = [safe_float(rows[j]['protein_g']) for j in range(start_7d, i + 1)]
avg_7d.append(round(sum(window_7d) / len(window_7d), 1) if window_7d else None)
# 28d rolling average
start_28d = max(0, i - 27)
window_28d = [safe_float(rows[j]['protein_g']) for j in range(start_28d, i + 1)]
avg_28d.append(round(sum(window_28d) / len(window_28d), 1) if window_28d else None)
# Add target range bands
target_low = targets['protein_target_low']
target_high = targets['protein_target_high']
datasets = [
{
"label": "Protein (täglich)",
"data": daily_values,
"borderColor": "#1D9E7599",
"backgroundColor": "rgba(29, 158, 117, 0.1)",
"borderWidth": 1.5,
"tension": 0.2,
"fill": False,
"pointRadius": 2
},
{
"label": "Ø 7 Tage",
"data": avg_7d,
"borderColor": "#1D9E75",
"borderWidth": 2.5,
"tension": 0.3,
"fill": False,
"pointRadius": 0
},
{
"label": "Ø 28 Tage",
"data": avg_28d,
"borderColor": "#085041",
"borderWidth": 2,
"tension": 0.3,
"fill": False,
"pointRadius": 0,
"borderDash": [6, 3]
},
{
"label": "Ziel Min",
"data": [target_low] * len(labels),
"borderColor": "#888",
"borderWidth": 1,
"borderDash": [5, 5],
"fill": False,
"pointRadius": 0
}
]
datasets.append({
"label": "Ziel Max",
"data": [target_high] * len(labels),
"borderColor": "#888",
"borderWidth": 1,
"borderDash": [5, 5],
"fill": False,
"pointRadius": 0
})
from data_layer.utils import calculate_confidence
confidence = calculate_confidence(len(rows), days, "general")
# Count days in target
days_in_target = sum(1 for v in daily_values if target_low <= v <= target_high)
return {
"chart_type": "line",
"data": {
"labels": labels,
"datasets": datasets
},
"metadata": serialize_dates({
"confidence": confidence,
"data_points": len(rows),
"target_low": round(target_low, 1),
"target_high": round(target_high, 1),
"days_in_target": days_in_target,
"target_compliance_pct": round(days_in_target / len(daily_values) * 100, 1) if daily_values else 0,
"first_date": rows[0]['date'],
"last_date": rows[-1]['date']
})
}
@router.get("/nutrition-consistency")
def get_nutrition_consistency_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Nutrition consistency score (E5).
Shows macro consistency score as bar chart.
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js bar chart with consistency metrics
"""
profile_id = session['profile_id']
consistency_data = get_macro_consistency_data(profile_id, days)
if consistency_data['confidence'] == 'insufficient':
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Nicht genug Daten für Konsistenz-Analyse"
}
}
# Show consistency score + macro averages
labels = [
"Gesamt-Score",
f"Protein ({consistency_data['avg_protein_pct']:.0f}%)",
f"Kohlenhydrate ({consistency_data['avg_carbs_pct']:.0f}%)",
f"Fett ({consistency_data['avg_fat_pct']:.0f}%)"
]
# Score = 100 - std_dev (inverted for display)
# Higher bar = more consistent
protein_consistency = max(0, 100 - consistency_data['std_dev_protein'] * 10)
carbs_consistency = max(0, 100 - consistency_data['std_dev_carbs'] * 10)
fat_consistency = max(0, 100 - consistency_data['std_dev_fat'] * 10)
values = [
consistency_data['consistency_score'],
protein_consistency,
carbs_consistency,
fat_consistency
]
return {
"chart_type": "bar",
"data": {
"labels": labels,
"datasets": [
{
"label": "Konsistenz-Score",
"data": values,
"backgroundColor": ["#1D9E75", "#1D9E75", "#F59E0B", "#EF4444"],
"borderColor": "#085041",
"borderWidth": 1
}
]
},
"metadata": {
"confidence": consistency_data['confidence'],
"data_points": consistency_data['data_points'],
"consistency_score": consistency_data['consistency_score'],
"std_dev_protein": round(consistency_data['std_dev_protein'], 2),
"std_dev_carbs": round(consistency_data['std_dev_carbs'], 2),
"std_dev_fat": round(consistency_data['std_dev_fat'], 2)
}
}
# ── NEW: Konzept-konforme Nutrition Endpoints (E3, E4, E5) ──────────────────
@router.get("/weekly-macro-distribution")
def get_weekly_macro_distribution_chart(
weeks: int = Query(default=12, ge=4, le=52),
session: dict = Depends(require_auth)
) -> Dict:
"""
Weekly macro distribution (E3) - Konzept-konform.
100%-gestapelter Wochenbalken statt Pie Chart.
Shows macro consistency across weeks, not just overall average.
Args:
weeks: Number of weeks to analyze (4-52, default 12)
session: Auth session (injected)
Returns:
Chart.js stacked bar chart with weekly macro percentages
"""
profile_id = session['profile_id']
from db import get_db, get_cursor
import statistics
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(weeks=weeks)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT date, protein_g, carbs_g, fat_g, kcal
FROM nutrition_log
WHERE profile_id=%s AND date >= %s
AND protein_g IS NOT NULL AND carbs_g IS NOT NULL
AND fat_g IS NOT NULL AND kcal > 0
ORDER BY date""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows or len(rows) < 7:
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": len(rows) if rows else 0,
"message": "Nicht genug Daten für Wochen-Analyse (min. 7 Tage)"
}
}
# Group by ISO week
weekly_data = {}
for row in rows:
date_obj = row['date'] if isinstance(row['date'], datetime) else datetime.fromisoformat(str(row['date']))
iso_week = date_obj.strftime('%Y-W%V')
if iso_week not in weekly_data:
weekly_data[iso_week] = {
'protein': [],
'carbs': [],
'fat': [],
'kcal': []
}
weekly_data[iso_week]['protein'].append(safe_float(row['protein_g']))
weekly_data[iso_week]['carbs'].append(safe_float(row['carbs_g']))
weekly_data[iso_week]['fat'].append(safe_float(row['fat_g']))
weekly_data[iso_week]['kcal'].append(safe_float(row['kcal']))
# Calculate weekly averages and percentages
labels = []
protein_pcts = []
carbs_pcts = []
fat_pcts = []
for iso_week in sorted(weekly_data.keys())[-weeks:]:
data = weekly_data[iso_week]
avg_protein = sum(data['protein']) / len(data['protein']) if data['protein'] else 0
avg_carbs = sum(data['carbs']) / len(data['carbs']) if data['carbs'] else 0
avg_fat = sum(data['fat']) / len(data['fat']) if data['fat'] else 0
# Convert to kcal
protein_kcal = avg_protein * 4
carbs_kcal = avg_carbs * 4
fat_kcal = avg_fat * 9
total_kcal = protein_kcal + carbs_kcal + fat_kcal
if total_kcal > 0:
labels.append(f"KW {iso_week[-2:]}")
protein_pcts.append(round((protein_kcal / total_kcal) * 100, 1))
carbs_pcts.append(round((carbs_kcal / total_kcal) * 100, 1))
fat_pcts.append(round((fat_kcal / total_kcal) * 100, 1))
# Calculate variation coefficient (Variationskoeffizient)
protein_cv = statistics.stdev(protein_pcts) / statistics.mean(protein_pcts) * 100 if len(protein_pcts) > 1 and statistics.mean(protein_pcts) > 0 else 0
carbs_cv = statistics.stdev(carbs_pcts) / statistics.mean(carbs_pcts) * 100 if len(carbs_pcts) > 1 and statistics.mean(carbs_pcts) > 0 else 0
fat_cv = statistics.stdev(fat_pcts) / statistics.mean(fat_pcts) * 100 if len(fat_pcts) > 1 and statistics.mean(fat_pcts) > 0 else 0
return {
"chart_type": "bar",
"data": {
"labels": labels,
"datasets": [
{
"label": "Protein (%)",
"data": protein_pcts,
"backgroundColor": "#1D9E75",
"stack": "macro"
},
{
"label": "Kohlenhydrate (%)",
"data": carbs_pcts,
"backgroundColor": "#F59E0B",
"stack": "macro"
},
{
"label": "Fett (%)",
"data": fat_pcts,
"backgroundColor": "#EF4444",
"stack": "macro"
}
]
},
"metadata": {
"confidence": calculate_confidence(len(rows), weeks * 7, "general"),
"data_points": len(rows),
"weeks_analyzed": len(labels),
"avg_protein_pct": round(statistics.mean(protein_pcts), 1) if protein_pcts else 0,
"avg_carbs_pct": round(statistics.mean(carbs_pcts), 1) if carbs_pcts else 0,
"avg_fat_pct": round(statistics.mean(fat_pcts), 1) if fat_pcts else 0,
"protein_cv": round(protein_cv, 1),
"carbs_cv": round(carbs_cv, 1),
"fat_cv": round(fat_cv, 1)
}
}
@router.get("/nutrition-adherence-score")
def get_nutrition_adherence_score(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Nutrition Adherence Score (E4) - Konzept-konform.
Score 0-100 based on goal-specific criteria:
- Calorie target adherence
- Protein target adherence
- Intake consistency
- Food quality indicators (fiber, sugar)
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
{
"score": 0-100,
"components": {...},
"recommendation": "..."
}
"""
profile_id = session['profile_id']
from db import get_db, get_cursor
from data_layer.nutrition_metrics import (
get_protein_adequacy_data,
calculate_macro_consistency_score
)
# Get user's goal mode (weight_loss, strength, endurance, etc.)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("SELECT goal_mode FROM profiles WHERE id = %s", (profile_id,))
profile_row = cur.fetchone()
goal_mode = profile_row['goal_mode'] if profile_row and profile_row['goal_mode'] else 'health'
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
# Get nutrition data
cur.execute(
"""SELECT COUNT(*) as cnt,
AVG(kcal) as avg_kcal,
STDDEV(kcal) as std_kcal,
AVG(protein_g) as avg_protein,
AVG(carbs_g) as avg_carbs,
AVG(fat_g) as avg_fat
FROM nutrition_log
WHERE profile_id=%s AND date >= %s
AND kcal IS NOT NULL""",
(profile_id, cutoff)
)
stats = cur.fetchone()
if not stats or stats['cnt'] < 7:
return {
"score": 0,
"components": {},
"metadata": {
"confidence": "insufficient",
"message": "Nicht genug Daten (min. 7 Tage)"
}
}
# Get protein adequacy
protein_data = get_protein_adequacy_data(profile_id, days)
# Calculate components based on goal mode
components = {}
# 1. Calorie adherence (placeholder, needs goal-specific logic)
calorie_adherence = 70.0 # TODO: Calculate based on TDEE target
# 2. Protein adherence
protein_adequacy_pct = protein_data.get('adequacy_score', 0)
protein_adherence = min(100, protein_adequacy_pct)
# 3. Intake consistency (low volatility = good)
kcal_cv = (safe_float(stats['std_kcal']) / safe_float(stats['avg_kcal']) * 100) if safe_float(stats['avg_kcal']) > 0 else 100
intake_consistency = max(0, 100 - kcal_cv) # Invert: low CV = high score
# 4. Food quality (placeholder for fiber/sugar analysis)
food_quality = 60.0 # TODO: Calculate from fiber/sugar data
# Goal-specific weighting (from concept E4)
if goal_mode == 'weight_loss':
weights = {
'calorie': 0.35,
'protein': 0.25,
'consistency': 0.20,
'quality': 0.20
}
elif goal_mode == 'strength':
weights = {
'calorie': 0.25,
'protein': 0.35,
'consistency': 0.20,
'quality': 0.20
}
elif goal_mode == 'endurance':
weights = {
'calorie': 0.30,
'protein': 0.20,
'consistency': 0.20,
'quality': 0.30
}
else: # health, recomposition
weights = {
'calorie': 0.25,
'protein': 0.25,
'consistency': 0.25,
'quality': 0.25
}
# Calculate weighted score
final_score = (
calorie_adherence * weights['calorie'] +
protein_adherence * weights['protein'] +
intake_consistency * weights['consistency'] +
food_quality * weights['quality']
)
components = {
'calorie_adherence': round(calorie_adherence, 1),
'protein_adherence': round(protein_adherence, 1),
'intake_consistency': round(intake_consistency, 1),
'food_quality': round(food_quality, 1)
}
# Generate recommendation
weak_areas = [k for k, v in components.items() if v < 60]
if weak_areas:
recommendation = f"Verbesserungspotenzial: {', '.join(weak_areas)}"
else:
recommendation = "Gute Adhärenz, weiter so!"
return {
"score": round(final_score, 1),
"components": components,
"goal_mode": goal_mode,
"weights": weights,
"recommendation": recommendation,
"metadata": {
"confidence": calculate_confidence(stats['cnt'], days, "general"),
"data_points": stats['cnt'],
"days_analyzed": days
}
}
@router.get("/energy-availability-warning")
def get_energy_availability_warning(
days: int = Query(default=14, ge=7, le=28),
session: dict = Depends(require_auth)
) -> Dict:
"""
Energy Availability Warning (E5) - Konzept-konform.
Heuristic warning for potential undernutrition/overtraining.
Checks:
- Persistent large deficit
- Recovery score declining
- Sleep quality declining
- LBM declining
Args:
days: Analysis window (7-28 days, default 14)
session: Auth session (injected)
Returns:
{
"warning_level": "none" | "caution" | "warning",
"triggers": [...],
"message": "..."
}
"""
profile_id = session['profile_id']
from db import get_db, get_cursor
from data_layer.nutrition_metrics import get_energy_balance_data
from data_layer.recovery_metrics import calculate_recovery_score_v2, calculate_sleep_quality_7d
from data_layer.body_metrics import calculate_lbm_28d_change
triggers = []
warning_level = "none"
# Check 1: Large energy deficit
energy_data = get_energy_balance_data(profile_id, days)
if energy_data.get('energy_balance', 0) < -500:
triggers.append("Großes Energiedefizit (>500 kcal/Tag)")
# Check 2: Recovery declining
try:
recovery_score = calculate_recovery_score_v2(profile_id)
if recovery_score and recovery_score < 50:
triggers.append("Recovery Score niedrig (<50)")
except:
pass
# Check 3: Sleep quality
try:
sleep_quality = calculate_sleep_quality_7d(profile_id)
if sleep_quality and sleep_quality < 60:
triggers.append("Schlafqualität reduziert (<60%)")
except:
pass
# Check 4: LBM declining
try:
lbm_change = calculate_lbm_28d_change(profile_id)
if lbm_change and lbm_change < -1.0:
triggers.append("Magermasse sinkt (-{:.1f} kg)".format(abs(lbm_change)))
except:
pass
# Determine warning level
if len(triggers) >= 3:
warning_level = "warning"
message = "⚠️ Hinweis auf mögliche Unterversorgung. Mehrere Indikatoren auffällig. Erwäge Defizit-Anpassung oder Regenerationswoche."
elif len(triggers) >= 2:
warning_level = "caution"
message = "⚡ Beobachte folgende Signale genau. Aktuell noch kein Handlungsbedarf, aber Trend beachten."
elif len(triggers) >= 1:
warning_level = "caution"
message = "💡 Ein Indikator auffällig. Weiter beobachten."
else:
message = "✅ Energieverfügbarkeit unauffällig."
return {
"warning_level": warning_level,
"triggers": triggers,
"message": message,
"metadata": {
"days_analyzed": days,
"trigger_count": len(triggers),
"note": "Heuristische Einschätzung, keine medizinische Diagnose"
}
}
@router.get("/training-volume")
def get_training_volume_chart(
weeks: int = Query(default=12, ge=4, le=52),
session: dict = Depends(require_auth)
) -> Dict:
"""
Training volume week-over-week (A1).
Shows weekly training minutes over time.
Args:
weeks: Number of weeks to analyze (4-52, default 12)
session: Auth session (injected)
Returns:
Chart.js bar chart with weekly training minutes
"""
profile_id = session['profile_id']
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(weeks=weeks)).strftime('%Y-%m-%d')
# Get weekly aggregates
cur.execute(
"""SELECT
DATE_TRUNC('week', date) as week_start,
SUM(duration_min) as total_minutes,
COUNT(*) as session_count
FROM activity_log
WHERE profile_id=%s AND date >= %s
GROUP BY week_start
ORDER BY week_start""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows:
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Aktivitätsdaten vorhanden"
}
}
labels = [row['week_start'].strftime('KW %V') for row in rows]
values = [safe_float(row['total_minutes']) for row in rows]
confidence = calculate_confidence(len(rows), weeks * 7, "general")
return {
"chart_type": "bar",
"data": {
"labels": labels,
"datasets": [
{
"label": "Trainingsminuten",
"data": values,
"backgroundColor": "#1D9E75",
"borderColor": "#085041",
"borderWidth": 1
}
]
},
"metadata": serialize_dates({
"confidence": confidence,
"data_points": len(rows),
"avg_minutes_week": round(sum(values) / len(values), 1) if values else 0,
"total_sessions": sum(row['session_count'] for row in rows)
})
}
@router.get("/training-type-distribution")
def get_training_type_distribution_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Training type distribution (A2).
Shows distribution of training categories as pie chart.
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js pie chart with training categories
"""
profile_id = session['profile_id']
dist_data = get_training_type_distribution_data(profile_id, days)
if dist_data['confidence'] == 'insufficient':
return {
"chart_type": "pie",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Trainingstypen-Daten"
}
}
labels = [item['category'] for item in dist_data['distribution']]
values = [item['count'] for item in dist_data['distribution']]
# Color palette for training categories
colors = [
"#1D9E75", "#3B82F6", "#F59E0B", "#EF4444",
"#8B5CF6", "#10B981", "#F97316", "#06B6D4"
]
return {
"chart_type": "pie",
"data": {
"labels": labels,
"datasets": [
{
"data": values,
"backgroundColor": colors[:len(values)],
"borderWidth": 2,
"borderColor": "#fff"
}
]
},
"metadata": {
"confidence": dist_data['confidence'],
"total_sessions": dist_data['total_sessions'],
"categorized_sessions": dist_data['categorized_sessions'],
"uncategorized_sessions": dist_data['uncategorized_sessions']
}
}
@router.get("/quality-sessions")
def get_quality_sessions_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Quality session rate (A3).
Shows percentage of quality sessions (RPE >= 7 or duration >= 60min).
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js bar chart with quality metrics
"""
profile_id = session['profile_id']
# Calculate quality session percentage
quality_pct = calculate_quality_sessions_pct(profile_id, days)
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT COUNT(*) as total
FROM activity_log
WHERE profile_id=%s AND date >= %s""",
(profile_id, cutoff)
)
row = cur.fetchone()
total_sessions = row['total'] if row else 0
if total_sessions == 0:
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Aktivitätsdaten"
}
}
quality_count = int(quality_pct / 100 * total_sessions)
regular_count = total_sessions - quality_count
return {
"chart_type": "bar",
"data": {
"labels": ["Qualitäts-Sessions", "Reguläre Sessions"],
"datasets": [
{
"label": "Anzahl",
"data": [quality_count, regular_count],
"backgroundColor": ["#1D9E75", "#888"],
"borderColor": "#085041",
"borderWidth": 1
}
]
},
"metadata": {
"confidence": calculate_confidence(total_sessions, days, "general"),
"data_points": total_sessions,
"quality_pct": round(quality_pct, 1),
"quality_count": quality_count,
"regular_count": regular_count
}
}
@router.get("/load-monitoring")
def get_load_monitoring_chart(
days: int = Query(default=28, ge=14, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Load monitoring (A4).
Shows acute load (7d) vs chronic load (28d) and ACWR.
Args:
days: Analysis window (14-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js line chart with load metrics
"""
profile_id = session['profile_id']
# Calculate loads
acute_load = calculate_proxy_internal_load_7d(profile_id)
chronic_load = calculate_proxy_internal_load_7d(profile_id, days=28)
# ACWR (Acute:Chronic Workload Ratio)
acwr = acute_load / chronic_load if chronic_load > 0 else 0
# Fetch daily loads for timeline
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT
date,
SUM(duration_min * COALESCE(rpe, 5)) as daily_load
FROM activity_log
WHERE profile_id=%s AND date >= %s
GROUP BY date
ORDER BY date""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows:
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Load-Daten"
}
}
labels = [row['date'].isoformat() for row in rows]
values = [safe_float(row['daily_load']) for row in rows]
return {
"chart_type": "line",
"data": {
"labels": labels,
"datasets": [
{
"label": "Tages-Load",
"data": values,
"borderColor": "#1D9E75",
"backgroundColor": "rgba(29, 158, 117, 0.1)",
"borderWidth": 2,
"tension": 0.3,
"fill": True
}
]
},
"metadata": serialize_dates({
"confidence": calculate_confidence(len(rows), days, "general"),
"data_points": len(rows),
"acute_load_7d": round(acute_load, 1),
"chronic_load_28d": round(chronic_load, 1),
"acwr": round(acwr, 2),
"acwr_status": "optimal" if 0.8 <= acwr <= 1.3 else "suboptimal"
})
}
@router.get("/monotony-strain")
def get_monotony_strain_chart(
days: int = Query(default=7, ge=7, le=28),
session: dict = Depends(require_auth)
) -> Dict:
"""
Monotony & Strain (A5).
Shows training monotony and strain scores.
Args:
days: Analysis window (7-28 days, default 7)
session: Auth session (injected)
Returns:
Chart.js bar chart with monotony and strain
"""
profile_id = session['profile_id']
monotony = calculate_monotony_score(profile_id, days)
strain = calculate_strain_score(profile_id, days)
if monotony == 0 and strain == 0:
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Nicht genug Daten für Monotonie-Analyse"
}
}
return {
"chart_type": "bar",
"data": {
"labels": ["Monotonie", "Strain"],
"datasets": [
{
"label": "Score",
"data": [round(monotony, 2), round(strain, 1)],
"backgroundColor": ["#F59E0B", "#EF4444"],
"borderColor": "#085041",
"borderWidth": 1
}
]
},
"metadata": {
"confidence": "medium", # Fixed for monotony calculations
"monotony_score": round(monotony, 2),
"strain_score": round(strain, 1),
"monotony_status": "high" if monotony > 2.0 else "normal",
"strain_status": "high" if strain > 10000 else "normal"
}
}
@router.get("/ability-balance")
def get_ability_balance_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Ability balance radar chart (A6).
Shows training distribution across 5 abilities.
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js radar chart with ability balance
"""
profile_id = session['profile_id']
balance_data = calculate_ability_balance(profile_id, days)
if balance_data['total_minutes'] == 0:
return {
"chart_type": "radar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Aktivitätsdaten"
}
}
labels = ["Kraft", "Ausdauer", "Beweglichkeit", "Gleichgewicht", "Geist"]
values = [
balance_data['strength_pct'],
balance_data['endurance_pct'],
balance_data['flexibility_pct'],
balance_data['balance_pct'],
balance_data['mind_pct']
]
return {
"chart_type": "radar",
"data": {
"labels": labels,
"datasets": [
{
"label": "Fähigkeiten-Balance (%)",
"data": values,
"borderColor": "#1D9E75",
"backgroundColor": "rgba(29, 158, 117, 0.2)",
"borderWidth": 2,
"pointBackgroundColor": "#1D9E75",
"pointBorderColor": "#fff",
"pointHoverBackgroundColor": "#fff",
"pointHoverBorderColor": "#1D9E75"
}
]
},
"metadata": {
"confidence": balance_data['confidence'],
"total_minutes": balance_data['total_minutes'],
"strength_pct": round(balance_data['strength_pct'], 1),
"endurance_pct": round(balance_data['endurance_pct'], 1),
"flexibility_pct": round(balance_data['flexibility_pct'], 1),
"balance_pct": round(balance_data['balance_pct'], 1),
"mind_pct": round(balance_data['mind_pct'], 1)
}
}
@router.get("/volume-by-ability")
def get_volume_by_ability_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Training volume by ability (A8).
Shows absolute minutes per ability category.
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js bar chart with volume per ability
"""
profile_id = session['profile_id']
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT
COALESCE(ability, 'unknown') as ability,
SUM(duration_min) as total_minutes
FROM activity_log
WHERE profile_id=%s AND date >= %s
GROUP BY ability
ORDER BY total_minutes DESC""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows:
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Ability-Daten"
}
}
# Map ability names to German
ability_map = {
"strength": "Kraft",
"endurance": "Ausdauer",
"flexibility": "Beweglichkeit",
"balance": "Gleichgewicht",
"mind": "Geist",
"unknown": "Nicht zugeordnet"
}
labels = [ability_map.get(row['ability'], row['ability']) for row in rows]
values = [safe_float(row['total_minutes']) for row in rows]
total_minutes = sum(values)
return {
"chart_type": "bar",
"data": {
"labels": labels,
"datasets": [
{
"label": "Trainingsminuten",
"data": values,
"backgroundColor": "#1D9E75",
"borderColor": "#085041",
"borderWidth": 1
}
]
},
"metadata": {
"confidence": calculate_confidence(len(rows), days, "general"),
"data_points": len(rows),
"total_minutes": total_minutes
}
}
# ── Recovery Charts ─────────────────────────────────────────────────────────
@router.get("/recovery-score")
def get_recovery_score_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Recovery score timeline (R1).
Shows daily recovery scores over time.
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js line chart with recovery scores
"""
profile_id = session['profile_id']
# For PoC: Use current recovery score and create synthetic timeline
# TODO: Store historical recovery scores for true timeline
current_score = calculate_recovery_score_v2(profile_id)
if current_score is None:
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Recovery-Daten vorhanden"
}
}
# Fetch vitals for timeline approximation
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT date, resting_hr, hrv_ms
FROM vitals_baseline
WHERE profile_id=%s AND date >= %s
ORDER BY date""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows:
return {
"chart_type": "line",
"data": {
"labels": [datetime.now().strftime('%Y-%m-%d')],
"datasets": [
{
"label": "Recovery Score",
"data": [current_score],
"borderColor": "#1D9E75",
"backgroundColor": "rgba(29, 158, 117, 0.1)",
"borderWidth": 2,
"tension": 0.3,
"fill": True
}
]
},
"metadata": {
"confidence": "low",
"data_points": 1,
"current_score": current_score
}
}
# Simple proxy: Use HRV as recovery indicator (higher HRV = better recovery)
# This is a placeholder until we store actual recovery scores
labels = [row['date'].isoformat() for row in rows]
# Normalize HRV to 0-100 scale (assume typical range 20-100ms)
values = [min(100, max(0, safe_float(row['hrv_ms']) if row['hrv_ms'] else 50)) for row in rows]
return {
"chart_type": "line",
"data": {
"labels": labels,
"datasets": [
{
"label": "Recovery Score (proxy)",
"data": values,
"borderColor": "#1D9E75",
"backgroundColor": "rgba(29, 158, 117, 0.1)",
"borderWidth": 2,
"tension": 0.3,
"fill": True
}
]
},
"metadata": serialize_dates({
"confidence": calculate_confidence(len(rows), days, "general"),
"data_points": len(rows),
"current_score": current_score,
"note": "Score based on HRV proxy; true recovery score calculation in development"
})
}
@router.get("/hrv-rhr-baseline")
def get_hrv_rhr_baseline_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
HRV/RHR vs baseline (R2).
Shows HRV and RHR trends vs. baseline values.
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js multi-line chart with HRV and RHR
"""
profile_id = session['profile_id']
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT date, resting_hr, hrv_ms
FROM vitals_baseline
WHERE profile_id=%s AND date >= %s
ORDER BY date""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows:
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Vitalwerte vorhanden"
}
}
labels = [row['date'].isoformat() for row in rows]
hrv_values = [safe_float(row['hrv_ms']) if row['hrv_ms'] else None for row in rows]
rhr_values = [safe_float(row['resting_hr']) if row['resting_hr'] else None for row in rows]
# Calculate baselines (28d median)
hrv_baseline = calculate_hrv_vs_baseline_pct(profile_id) # This returns % deviation
rhr_baseline = calculate_rhr_vs_baseline_pct(profile_id) # This returns % deviation
# For chart, we need actual baseline values (approximation)
hrv_filtered = [v for v in hrv_values if v is not None]
rhr_filtered = [v for v in rhr_values if v is not None]
avg_hrv = sum(hrv_filtered) / len(hrv_filtered) if hrv_filtered else 50
avg_rhr = sum(rhr_filtered) / len(rhr_filtered) if rhr_filtered else 60
datasets = [
{
"label": "HRV (ms)",
"data": hrv_values,
"borderColor": "#1D9E75",
"backgroundColor": "rgba(29, 158, 117, 0.1)",
"borderWidth": 2,
"tension": 0.3,
"yAxisID": "y1",
"fill": False
},
{
"label": "RHR (bpm)",
"data": rhr_values,
"borderColor": "#3B82F6",
"backgroundColor": "rgba(59, 130, 246, 0.1)",
"borderWidth": 2,
"tension": 0.3,
"yAxisID": "y2",
"fill": False
}
]
return {
"chart_type": "line",
"data": {
"labels": labels,
"datasets": datasets
},
"metadata": serialize_dates({
"confidence": calculate_confidence(len(rows), days, "general"),
"data_points": len(rows),
"avg_hrv": round(avg_hrv, 1),
"avg_rhr": round(avg_rhr, 1),
"hrv_vs_baseline_pct": hrv_baseline,
"rhr_vs_baseline_pct": rhr_baseline
})
}
@router.get("/sleep-duration-quality")
def get_sleep_duration_quality_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Sleep duration + quality (R3).
Shows sleep duration and quality score over time.
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js multi-line chart with sleep metrics
"""
profile_id = session['profile_id']
duration_data = get_sleep_duration_data(profile_id, days)
quality_data = get_sleep_quality_data(profile_id, days)
if duration_data['confidence'] == 'insufficient':
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Schlafdaten vorhanden"
}
}
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT date, total_sleep_min
FROM sleep_log
WHERE profile_id=%s AND date >= %s
ORDER BY date""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows:
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Schlafdaten"
}
}
labels = [row['date'].isoformat() for row in rows]
duration_hours = [safe_float(row['total_sleep_min']) / 60 if row['total_sleep_min'] else None for row in rows]
# Quality score (simple proxy: % of 8 hours)
quality_scores = [(d / 8 * 100) if d else None for d in duration_hours]
datasets = [
{
"label": "Schlafdauer (h)",
"data": duration_hours,
"borderColor": "#3B82F6",
"backgroundColor": "rgba(59, 130, 246, 0.1)",
"borderWidth": 2,
"tension": 0.3,
"yAxisID": "y1",
"fill": True
},
{
"label": "Qualität (%)",
"data": quality_scores,
"borderColor": "#1D9E75",
"backgroundColor": "rgba(29, 158, 117, 0.1)",
"borderWidth": 2,
"tension": 0.3,
"yAxisID": "y2",
"fill": False
}
]
return {
"chart_type": "line",
"data": {
"labels": labels,
"datasets": datasets
},
"metadata": serialize_dates({
"confidence": duration_data['confidence'],
"data_points": len(rows),
"avg_duration_hours": round(duration_data['avg_duration_hours'], 1),
"sleep_quality_score": quality_data.get('sleep_quality_score', 0)
})
}
@router.get("/sleep-debt")
def get_sleep_debt_chart(
days: int = Query(default=28, ge=7, le=90),
session: dict = Depends(require_auth)
) -> Dict:
"""
Sleep debt accumulation (R4).
Shows cumulative sleep debt over time.
Args:
days: Analysis window (7-90 days, default 28)
session: Auth session (injected)
Returns:
Chart.js line chart with sleep debt
"""
profile_id = session['profile_id']
current_debt = calculate_sleep_debt_hours(profile_id)
if current_debt is None:
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Schlafdaten für Schulden-Berechnung"
}
}
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
cur.execute(
"""SELECT date, total_sleep_min
FROM sleep_log
WHERE profile_id=%s AND date >= %s
ORDER BY date""",
(profile_id, cutoff)
)
rows = cur.fetchall()
if not rows:
return {
"chart_type": "line",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Schlafdaten"
}
}
labels = [row['date'].isoformat() for row in rows]
# Calculate cumulative debt (target 8h/night)
target_hours = 8.0
cumulative_debt = 0
debt_values = []
for row in rows:
actual_hours = safe_float(row['total_sleep_min']) / 60 if row['total_sleep_min'] else 0
daily_deficit = target_hours - actual_hours
cumulative_debt += daily_deficit
debt_values.append(cumulative_debt)
return {
"chart_type": "line",
"data": {
"labels": labels,
"datasets": [
{
"label": "Schlafschuld (Stunden)",
"data": debt_values,
"borderColor": "#EF4444",
"backgroundColor": "rgba(239, 68, 68, 0.1)",
"borderWidth": 2,
"tension": 0.3,
"fill": True
}
]
},
"metadata": serialize_dates({
"confidence": calculate_confidence(len(rows), days, "general"),
"data_points": len(rows),
"current_debt_hours": round(current_debt, 1),
"final_debt_hours": round(cumulative_debt, 1)
})
}
@router.get("/vital-signs-matrix")
def get_vital_signs_matrix_chart(
days: int = Query(default=7, ge=7, le=30),
session: dict = Depends(require_auth)
) -> Dict:
"""
Vital signs matrix (R5).
Shows latest vital signs as horizontal bar chart.
Args:
days: Max age of measurements (7-30 days, default 7)
session: Auth session (injected)
Returns:
Chart.js horizontal bar chart with vital signs
"""
profile_id = session['profile_id']
from db import get_db, get_cursor
with get_db() as conn:
cur = get_cursor(conn)
cutoff = (datetime.now() - timedelta(days=days)).strftime('%Y-%m-%d')
# Get latest vitals
cur.execute(
"""SELECT resting_hr, hrv_ms, vo2_max, spo2, respiratory_rate
FROM vitals_baseline
WHERE profile_id=%s AND date >= %s
ORDER BY date DESC
LIMIT 1""",
(profile_id, cutoff)
)
vitals_row = cur.fetchone()
# Get latest blood pressure
cur.execute(
"""SELECT systolic, diastolic
FROM blood_pressure_log
WHERE profile_id=%s AND date >= %s
ORDER BY date DESC, time DESC
LIMIT 1""",
(profile_id, cutoff)
)
bp_row = cur.fetchone()
if not vitals_row and not bp_row:
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine aktuellen Vitalwerte"
}
}
labels = []
values = []
if vitals_row:
if vitals_row['resting_hr']:
labels.append("Ruhepuls (bpm)")
values.append(safe_float(vitals_row['resting_hr']))
if vitals_row['hrv_ms']:
labels.append("HRV (ms)")
values.append(safe_float(vitals_row['hrv_ms']))
if vitals_row['vo2_max']:
labels.append("VO2 Max")
values.append(safe_float(vitals_row['vo2_max']))
if vitals_row['spo2']:
labels.append("SpO2 (%)")
values.append(safe_float(vitals_row['spo2']))
if vitals_row['respiratory_rate']:
labels.append("Atemfrequenz")
values.append(safe_float(vitals_row['respiratory_rate']))
if bp_row:
if bp_row['systolic']:
labels.append("Blutdruck sys (mmHg)")
values.append(safe_float(bp_row['systolic']))
if bp_row['diastolic']:
labels.append("Blutdruck dia (mmHg)")
values.append(safe_float(bp_row['diastolic']))
if not labels:
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Keine Vitalwerte verfügbar"
}
}
return {
"chart_type": "bar",
"data": {
"labels": labels,
"datasets": [
{
"label": "Wert",
"data": values,
"backgroundColor": "#1D9E75",
"borderColor": "#085041",
"borderWidth": 1
}
]
},
"metadata": {
"confidence": "medium",
"data_points": len(values),
"note": "Latest measurements within last " + str(days) + " days"
}
}
# ── Correlation Charts ──────────────────────────────────────────────────────
@router.get("/weight-energy-correlation")
def get_weight_energy_correlation_chart(
max_lag: int = Query(default=14, ge=7, le=28),
session: dict = Depends(require_auth)
) -> Dict:
"""
Weight vs energy balance correlation (C1).
Shows lag correlation between energy intake and weight change.
Args:
max_lag: Maximum lag days to analyze (7-28, default 14)
session: Auth session (injected)
Returns:
Chart.js scatter chart with correlation data
"""
profile_id = session['profile_id']
corr_data = calculate_lag_correlation(profile_id, "energy_balance", "weight", max_lag)
if not corr_data or corr_data.get('correlation') is None:
return {
"chart_type": "scatter",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Nicht genug Daten für Korrelationsanalyse"
}
}
# Create lag vs correlation data for chart
# For simplicity, show best lag point as single data point
best_lag = corr_data.get('best_lag_days', 0)
correlation = corr_data.get('correlation', 0)
return {
"chart_type": "scatter",
"data": {
"labels": [f"Lag {best_lag} Tage"],
"datasets": [
{
"label": "Korrelation",
"data": [{"x": best_lag, "y": correlation}],
"backgroundColor": "#1D9E75",
"borderColor": "#085041",
"borderWidth": 2,
"pointRadius": 8
}
]
},
"metadata": {
"confidence": corr_data.get('confidence', 'low'),
"correlation": round(correlation, 3),
"best_lag_days": best_lag,
"interpretation": corr_data.get('interpretation', ''),
"data_points": corr_data.get('data_points', 0)
}
}
@router.get("/lbm-protein-correlation")
def get_lbm_protein_correlation_chart(
max_lag: int = Query(default=14, ge=7, le=28),
session: dict = Depends(require_auth)
) -> Dict:
"""
Lean mass vs protein intake correlation (C2).
Shows lag correlation between protein intake and lean mass change.
Args:
max_lag: Maximum lag days to analyze (7-28, default 14)
session: Auth session (injected)
Returns:
Chart.js scatter chart with correlation data
"""
profile_id = session['profile_id']
corr_data = calculate_lag_correlation(profile_id, "protein", "lbm", max_lag)
if not corr_data or corr_data.get('correlation') is None:
return {
"chart_type": "scatter",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Nicht genug Daten für LBM-Protein Korrelation"
}
}
best_lag = corr_data.get('best_lag_days', 0)
correlation = corr_data.get('correlation', 0)
return {
"chart_type": "scatter",
"data": {
"labels": [f"Lag {best_lag} Tage"],
"datasets": [
{
"label": "Korrelation",
"data": [{"x": best_lag, "y": correlation}],
"backgroundColor": "#3B82F6",
"borderColor": "#1E40AF",
"borderWidth": 2,
"pointRadius": 8
}
]
},
"metadata": {
"confidence": corr_data.get('confidence', 'low'),
"correlation": round(correlation, 3),
"best_lag_days": best_lag,
"interpretation": corr_data.get('interpretation', ''),
"data_points": corr_data.get('data_points', 0)
}
}
@router.get("/load-vitals-correlation")
def get_load_vitals_correlation_chart(
max_lag: int = Query(default=14, ge=7, le=28),
session: dict = Depends(require_auth)
) -> Dict:
"""
Training load vs vitals correlation (C3).
Shows lag correlation between training load and HRV/RHR.
Args:
max_lag: Maximum lag days to analyze (7-28, default 14)
session: Auth session (injected)
Returns:
Chart.js scatter chart with correlation data
"""
profile_id = session['profile_id']
# Try HRV first
corr_hrv = calculate_lag_correlation(profile_id, "load", "hrv", max_lag)
corr_rhr = calculate_lag_correlation(profile_id, "load", "rhr", max_lag)
# Use whichever has stronger correlation
if corr_hrv and corr_rhr:
corr_data = corr_hrv if abs(corr_hrv.get('correlation', 0)) > abs(corr_rhr.get('correlation', 0)) else corr_rhr
metric_name = "HRV" if corr_data == corr_hrv else "RHR"
elif corr_hrv:
corr_data = corr_hrv
metric_name = "HRV"
elif corr_rhr:
corr_data = corr_rhr
metric_name = "RHR"
else:
return {
"chart_type": "scatter",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Nicht genug Daten für Load-Vitals Korrelation"
}
}
best_lag = corr_data.get('best_lag_days', 0)
correlation = corr_data.get('correlation', 0)
return {
"chart_type": "scatter",
"data": {
"labels": [f"Load → {metric_name} (Lag {best_lag}d)"],
"datasets": [
{
"label": "Korrelation",
"data": [{"x": best_lag, "y": correlation}],
"backgroundColor": "#F59E0B",
"borderColor": "#D97706",
"borderWidth": 2,
"pointRadius": 8
}
]
},
"metadata": {
"confidence": corr_data.get('confidence', 'low'),
"correlation": round(correlation, 3),
"best_lag_days": best_lag,
"metric": metric_name,
"interpretation": corr_data.get('interpretation', ''),
"data_points": corr_data.get('data_points', 0)
}
}
@router.get("/recovery-performance")
def get_recovery_performance_chart(
session: dict = Depends(require_auth)
) -> Dict:
"""
Recovery vs performance correlation (C4).
Shows relationship between recovery metrics and training quality.
Args:
session: Auth session (injected)
Returns:
Chart.js bar chart with top drivers
"""
profile_id = session['profile_id']
# Get top drivers (hindering/helpful factors)
drivers = calculate_top_drivers(profile_id)
if not drivers or len(drivers) == 0:
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "insufficient",
"data_points": 0,
"message": "Nicht genug Daten für Driver-Analyse"
}
}
# Separate hindering and helpful
hindering = [d for d in drivers if d.get('impact', '') == 'hindering']
helpful = [d for d in drivers if d.get('impact', '') == 'helpful']
# Take top 3 of each
top_hindering = hindering[:3]
top_helpful = helpful[:3]
labels = []
values = []
colors = []
for d in top_hindering:
labels.append(f"{d.get('factor', '')}")
values.append(-abs(d.get('score', 0))) # Negative for hindering
colors.append("#EF4444")
for d in top_helpful:
labels.append(f"{d.get('factor', '')}")
values.append(abs(d.get('score', 0))) # Positive for helpful
colors.append("#1D9E75")
if not labels:
return {
"chart_type": "bar",
"data": {
"labels": [],
"datasets": []
},
"metadata": {
"confidence": "low",
"data_points": 0,
"message": "Keine signifikanten Treiber gefunden"
}
}
return {
"chart_type": "bar",
"data": {
"labels": labels,
"datasets": [
{
"label": "Impact Score",
"data": values,
"backgroundColor": colors,
"borderColor": "#085041",
"borderWidth": 1
}
]
},
"metadata": {
"confidence": "medium",
"hindering_count": len(top_hindering),
"helpful_count": len(top_helpful),
"total_factors": len(drivers)
}
}
# ── Health Endpoint ──────────────────────────────────────────────────────────
@router.get("/health")
def health_check() -> Dict:
"""
Health check endpoint for charts API.
Returns:
{
"status": "ok",
"version": "1.0",
"available_charts": [...]
}
"""
return {
"status": "ok",
"version": "1.0",
"phase": "0c",
"available_charts": [
{
"category": "body",
"endpoint": "/charts/weight-trend",
"type": "line",
"description": "Weight trend over time"
},
{
"category": "body",
"endpoint": "/charts/body-composition",
"type": "line",
"description": "Body fat % and lean mass"
},
{
"category": "body",
"endpoint": "/charts/circumferences",
"type": "bar",
"description": "Latest circumference measurements"
},
{
"category": "nutrition",
"endpoint": "/charts/energy-balance",
"type": "line",
"description": "Daily calorie intake vs. TDEE"
},
{
"category": "nutrition",
"endpoint": "/charts/macro-distribution",
"type": "pie",
"description": "Protein/Carbs/Fat distribution"
},
{
"category": "nutrition",
"endpoint": "/charts/protein-adequacy",
"type": "line",
"description": "Protein intake vs. target range"
},
{
"category": "nutrition",
"endpoint": "/charts/nutrition-consistency",
"type": "bar",
"description": "Macro consistency score"
},
{
"category": "activity",
"endpoint": "/charts/training-volume",
"type": "bar",
"description": "Weekly training minutes"
},
{
"category": "activity",
"endpoint": "/charts/training-type-distribution",
"type": "pie",
"description": "Training category distribution"
},
{
"category": "activity",
"endpoint": "/charts/quality-sessions",
"type": "bar",
"description": "Quality session rate"
},
{
"category": "activity",
"endpoint": "/charts/load-monitoring",
"type": "line",
"description": "Acute vs chronic load + ACWR"
},
{
"category": "activity",
"endpoint": "/charts/monotony-strain",
"type": "bar",
"description": "Training monotony and strain"
},
{
"category": "activity",
"endpoint": "/charts/ability-balance",
"type": "radar",
"description": "Training balance across 5 abilities"
},
{
"category": "activity",
"endpoint": "/charts/volume-by-ability",
"type": "bar",
"description": "Training volume per ability"
},
{
"category": "recovery",
"endpoint": "/charts/recovery-score",
"type": "line",
"description": "Recovery score timeline"
},
{
"category": "recovery",
"endpoint": "/charts/hrv-rhr-baseline",
"type": "line",
"description": "HRV and RHR vs baseline"
},
{
"category": "recovery",
"endpoint": "/charts/sleep-duration-quality",
"type": "line",
"description": "Sleep duration and quality"
},
{
"category": "recovery",
"endpoint": "/charts/sleep-debt",
"type": "line",
"description": "Cumulative sleep debt"
},
{
"category": "recovery",
"endpoint": "/charts/vital-signs-matrix",
"type": "bar",
"description": "Latest vital signs overview"
},
{
"category": "correlations",
"endpoint": "/charts/weight-energy-correlation",
"type": "scatter",
"description": "Weight vs energy balance (lag correlation)"
},
{
"category": "correlations",
"endpoint": "/charts/lbm-protein-correlation",
"type": "scatter",
"description": "Lean mass vs protein intake (lag correlation)"
},
{
"category": "correlations",
"endpoint": "/charts/load-vitals-correlation",
"type": "scatter",
"description": "Training load vs HRV/RHR (lag correlation)"
},
{
"category": "correlations",
"endpoint": "/charts/recovery-performance",
"type": "bar",
"description": "Top drivers (hindering/helpful factors)"
}
]
}