v9d Phase 2d: Vitals Module Refactoring (Baseline + Blood Pressure) #22

Merged
Lars merged 29 commits from develop into main 2026-03-23 16:27:03 +01:00
28 changed files with 6152 additions and 67 deletions

View File

@ -135,12 +135,45 @@ frontend/src/
- `.claude/docs/functional/AI_PROMPTS.md` (erweitert um Fähigkeiten-Mapping)
- `.claude/docs/technical/CENTRAL_SUBSCRIPTION_SYSTEM.md`
### v9d Phase 2 🔲 (Nächster Schritt)
### v9d Phase 2 ✅ (Deployed to Dev 23.03.2026)
**Vitalwerte & Erholung:**
- 🔲 Ruhetage erfassen (rest_days Tabelle)
- 🔲 Ruhepuls erfassen (vitals_log Tabelle)
- 🔲 HF-Zonen + Erholungsstatus
- 🔲 Schlaf-Modul (Basis)
- ✅ **Schlaf-Modul (v9d Phase 2b):**
- Tabelle `sleep_log` mit JSONB sleep_segments
- Schlafphasen (Deep, REM, Light, Awake)
- Apple Health CSV Import
- Schlaf-Statistiken & Trends
- Schlafschuld-Berechnung
- ✅ **Ruhetage (v9d Phase 2a):**
- Tabelle `rest_days` (Multi-Dimensional Rest)
- 3 Typen: Kraft-Ruhe, Cardio-Ruhe, Entspannung
- Quick Mode Presets + Custom Entry
- Validierung gegen geplante Aktivitäten
- Dashboard Widget mit aktuellen Ruhetagen
- ✅ **Vitalwerte erweitert (v9d Phase 2d):**
- Ruhepuls + HRV (morgens)
- Blutdruck (Systolisch/Diastolisch + Puls)
- VO2 Max (Apple Watch)
- SpO2 (Blutsauerstoffsättigung)
- Atemfrequenz
- Unregelmäßiger Herzschlag & AFib-Warnungen
- CSV Import: Omron (Blutdruck) + Apple Health (alle Vitals)
- Trend-Analyse (7d/14d/30d)
- 🔲 **HF-Zonen + Erholungsstatus (v9d Phase 2e):**
- HF-Zonen-Verteilung pro Training
- Recovery Score basierend auf Ruhepuls + HRV + Schlaf
- Übertraining-Warnung
**Migrations:**
- Migration 010: sleep_log Tabelle (JSONB segments)
- Migration 011: rest_days Tabelle (Kraft, Cardio, Entspannung)
- Migration 012: Unique constraint rest_days (profile_id, date, rest_type)
- Migration 013: vitals_log Tabelle (Ruhepuls, HRV)
- Migration 014: Extended vitals (BP, VO2 Max, SpO2, respiratory_rate)
📚 Details: `.claude/docs/functional/TRAINING_TYPES.md`

View File

@ -0,0 +1,287 @@
"""
Training Type Profiles - Helper Functions
Utilities for loading parameters, profiles, and running evaluations.
Issue: #15
Date: 2026-03-23
"""
from typing import Dict, Optional, List
from decimal import Decimal
import logging
from db import get_cursor
from profile_evaluator import TrainingProfileEvaluator
logger = logging.getLogger(__name__)
def convert_decimals(obj):
"""
Recursively converts Decimal objects to float for JSON serialization.
PostgreSQL returns numeric values as Decimal, but psycopg2.Json() can't serialize them.
"""
if isinstance(obj, Decimal):
return float(obj)
elif isinstance(obj, dict):
return {k: convert_decimals(v) for k, v in obj.items()}
elif isinstance(obj, list):
return [convert_decimals(item) for item in obj]
return obj
def load_parameters_registry(cur) -> Dict[str, Dict]:
"""
Loads training parameters registry from database.
Returns:
Dict mapping parameter_key -> config
"""
cur.execute("""
SELECT key, name_de, name_en, category, data_type, unit,
description_de, source_field, validation_rules
FROM training_parameters
WHERE is_active = true
""")
registry = {}
for row in cur.fetchall():
registry[row['key']] = dict(row)
return registry
def load_training_type_profile(cur, training_type_id: int) -> Optional[Dict]:
"""
Loads training type profile for a given type ID.
Returns:
Profile JSONB or None if not configured
"""
cur.execute(
"SELECT profile FROM training_types WHERE id = %s",
(training_type_id,)
)
row = cur.fetchone()
if row and row['profile']:
return row['profile']
return None
def load_evaluation_context(
cur,
profile_id: str,
activity_date: str,
lookback_days: int = 30
) -> Dict:
"""
Loads context data for evaluation (user profile + recent activities).
Args:
cur: Database cursor
profile_id: User profile ID
activity_date: Date of activity being evaluated
lookback_days: How many days of history to load
Returns:
{
"user_profile": {...},
"recent_activities": [...],
"historical_activities": [...]
}
"""
# Load user profile
cur.execute(
"SELECT hf_max, sleep_goal_minutes FROM profiles WHERE id = %s",
(profile_id,)
)
user_row = cur.fetchone()
user_profile = dict(user_row) if user_row else {}
# Load recent activities (last N days)
cur.execute("""
SELECT id, date, training_type_id, duration_min, hr_avg, hr_max,
distance_km, kcal_active, rpe
FROM activity_log
WHERE profile_id = %s
AND date >= %s::date - INTERVAL '%s days'
AND date < %s::date
ORDER BY date DESC
LIMIT 50
""", (profile_id, activity_date, lookback_days, activity_date))
recent_activities = [dict(r) for r in cur.fetchall()]
# Historical activities (same for MVP)
historical_activities = recent_activities
return {
"user_profile": user_profile,
"recent_activities": recent_activities,
"historical_activities": historical_activities
}
def evaluate_and_save_activity(
cur,
activity_id: str,
activity_data: Dict,
training_type_id: int,
profile_id: str
) -> Optional[Dict]:
"""
Evaluates an activity and saves the result to the database.
Args:
cur: Database cursor
activity_id: Activity ID
activity_data: Activity data dict
training_type_id: Training type ID
profile_id: User profile ID
Returns:
Evaluation result or None if no profile configured
"""
# Load profile
profile = load_training_type_profile(cur, training_type_id)
if not profile:
logger.info(f"[EVALUATION] No profile for training_type {training_type_id}, skipping")
return None
# Load parameters registry
parameters = load_parameters_registry(cur)
# Load context
context = load_evaluation_context(
cur,
profile_id,
activity_data.get("date"),
lookback_days=30
)
# Convert Decimal values in activity_data and context
activity_data_clean = convert_decimals(activity_data)
context_clean = convert_decimals(context)
# Evaluate
evaluator = TrainingProfileEvaluator(parameters)
evaluation_result = evaluator.evaluate_activity(
activity_data_clean,
profile,
context_clean
)
# Save to database
from psycopg2.extras import Json
# Convert Decimal to float for JSON serialization
evaluation_result_clean = convert_decimals(evaluation_result)
cur.execute("""
UPDATE activity_log
SET evaluation = %s,
quality_label = %s,
overall_score = %s
WHERE id = %s
""", (
Json(evaluation_result_clean),
evaluation_result_clean.get("quality_label"),
evaluation_result_clean.get("overall_score"),
activity_id
))
logger.info(
f"[EVALUATION] Activity {activity_id}: "
f"{evaluation_result.get('quality_label')} "
f"(score: {evaluation_result.get('overall_score')})"
)
return evaluation_result
def batch_evaluate_activities(
cur,
profile_id: str,
limit: Optional[int] = None
) -> Dict:
"""
Re-evaluates all activities for a user.
Useful for:
- Initial setup after profiles are configured
- Re-evaluation after profile changes
Args:
cur: Database cursor
profile_id: User profile ID
limit: Optional limit for testing
Returns:
{
"total": int,
"evaluated": int,
"skipped": int,
"errors": int
}
"""
# Load all activities
query = """
SELECT id, profile_id, date, training_type_id, duration_min,
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
rpe, pace_min_per_km, cadence, elevation_gain
FROM activity_log
WHERE profile_id = %s
ORDER BY date DESC
"""
params = [profile_id]
if limit:
query += " LIMIT %s"
params.append(limit)
cur.execute(query, params)
activities = cur.fetchall()
stats = {
"total": len(activities),
"evaluated": 0,
"skipped": 0,
"errors": 0
}
# Track error details
error_details = []
for activity in activities:
activity_dict = dict(activity)
try:
result = evaluate_and_save_activity(
cur,
activity_dict["id"],
activity_dict,
activity_dict["training_type_id"],
profile_id
)
if result:
stats["evaluated"] += 1
else:
stats["skipped"] += 1
except Exception as e:
logger.error(f"[BATCH-EVAL] Error evaluating {activity_dict['id']}: {e}")
error_details.append({
"activity_id": activity_dict['id'],
"training_type_id": activity_dict.get('training_type_id'),
"error": str(e)
})
stats["errors"] += 1
# Add error details to stats (limit to first 10)
if error_details:
stats["error_details"] = error_details[:10]
logger.info(f"[BATCH-EVAL] Completed: {stats}")
return stats

View File

@ -21,6 +21,8 @@ from routers import admin, stats, exportdata, importdata
from routers import subscription, coupons, features, tiers_mgmt, tier_limits
from routers import user_restrictions, access_grants, training_types, admin_training_types
from routers import admin_activity_mappings, sleep, rest_days
from routers import vitals_baseline, blood_pressure # v9d Phase 2d Refactored
from routers import evaluation # v9d/v9e Training Type Profiles (#15)
# ── App Configuration ─────────────────────────────────────────────────────────
DATA_DIR = Path(os.getenv("DATA_DIR", "./data"))
@ -86,12 +88,15 @@ app.include_router(tier_limits.router) # /api/tier-limits (admin)
app.include_router(user_restrictions.router) # /api/user-restrictions (admin)
app.include_router(access_grants.router) # /api/access-grants (admin)
# v9d Training Types & Sleep Module & Rest Days
# v9d Training Types & Sleep Module & Rest Days & Vitals
app.include_router(training_types.router) # /api/training-types/*
app.include_router(admin_training_types.router) # /api/admin/training-types/*
app.include_router(admin_activity_mappings.router) # /api/admin/activity-mappings/*
app.include_router(sleep.router) # /api/sleep/* (v9d Phase 2b)
app.include_router(rest_days.router) # /api/rest-days/* (v9d Phase 2a)
app.include_router(vitals_baseline.router) # /api/vitals/baseline/* (v9d Phase 2d Refactored)
app.include_router(blood_pressure.router) # /api/blood-pressure/* (v9d Phase 2d Refactored)
app.include_router(evaluation.router) # /api/evaluation/* (v9d/v9e Training Profiles #15)
# ── Health Check ──────────────────────────────────────────────────────────────
@app.get("/")

View File

@ -0,0 +1,145 @@
-- Migration 013: Training Parameters Registry
-- Training Type Profiles System - Foundation
-- Date: 2026-03-23
-- Issue: #15
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- TRAINING PARAMETERS REGISTRY
-- Zentrale Definition aller messbaren Parameter für Aktivitäten
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
CREATE TABLE IF NOT EXISTS training_parameters (
id SERIAL PRIMARY KEY,
key VARCHAR(50) UNIQUE NOT NULL,
name_de VARCHAR(100) NOT NULL,
name_en VARCHAR(100) NOT NULL,
category VARCHAR(50) NOT NULL,
data_type VARCHAR(20) NOT NULL,
unit VARCHAR(20),
description_de TEXT,
description_en TEXT,
source_field VARCHAR(100),
validation_rules JSONB DEFAULT '{}'::jsonb,
is_active BOOLEAN DEFAULT true,
created_at TIMESTAMP DEFAULT NOW(),
CONSTRAINT chk_category CHECK (category IN (
'physical', 'physiological', 'subjective', 'environmental', 'performance'
)),
CONSTRAINT chk_data_type CHECK (data_type IN (
'integer', 'float', 'string', 'boolean'
))
);
CREATE INDEX idx_training_parameters_category ON training_parameters(category) WHERE is_active = true;
CREATE INDEX idx_training_parameters_key ON training_parameters(key) WHERE is_active = true;
COMMENT ON TABLE training_parameters IS 'Registry of all measurable activity parameters (Training Type Profiles System)';
COMMENT ON COLUMN training_parameters.key IS 'Unique identifier (e.g. "avg_hr", "duration_min")';
COMMENT ON COLUMN training_parameters.category IS 'Parameter category: physical, physiological, subjective, environmental, performance';
COMMENT ON COLUMN training_parameters.data_type IS 'Data type: integer, float, string, boolean';
COMMENT ON COLUMN training_parameters.source_field IS 'Mapping to activity_log column name';
COMMENT ON COLUMN training_parameters.validation_rules IS 'Min/Max/Enum for validation (JSONB)';
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- STANDARD PARAMETERS
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
INSERT INTO training_parameters (key, name_de, name_en, category, data_type, unit, source_field, validation_rules, description_de, description_en) VALUES
-- Physical Parameters
('duration_min', 'Dauer', 'Duration', 'physical', 'integer', 'min', 'duration_min',
'{"min": 0, "max": 600}'::jsonb,
'Trainingsdauer in Minuten',
'Training duration in minutes'),
('distance_km', 'Distanz', 'Distance', 'physical', 'float', 'km', 'distance_km',
'{"min": 0, "max": 200}'::jsonb,
'Zurückgelegte Distanz in Kilometern',
'Distance covered in kilometers'),
('kcal_active', 'Aktive Kalorien', 'Active Calories', 'physical', 'integer', 'kcal', 'kcal_active',
'{"min": 0, "max": 5000}'::jsonb,
'Aktiver Kalorienverbrauch',
'Active calorie burn'),
('kcal_resting', 'Ruhekalorien', 'Resting Calories', 'physical', 'integer', 'kcal', 'kcal_resting',
'{"min": 0, "max": 2000}'::jsonb,
'Ruheumsatz während Training',
'Resting calorie burn during training'),
('elevation_gain', 'Höhenmeter', 'Elevation Gain', 'physical', 'integer', 'm', 'elevation_gain',
'{"min": 0, "max": 5000}'::jsonb,
'Überwundene Höhenmeter',
'Elevation gain in meters'),
('pace_min_per_km', 'Pace', 'Pace', 'physical', 'float', 'min/km', 'pace_min_per_km',
'{"min": 2, "max": 20}'::jsonb,
'Durchschnittstempo in Minuten pro Kilometer',
'Average pace in minutes per kilometer'),
('cadence', 'Trittfrequenz', 'Cadence', 'physical', 'integer', 'spm', 'cadence',
'{"min": 0, "max": 220}'::jsonb,
'Schrittfrequenz (Schritte pro Minute)',
'Step frequency (steps per minute)'),
-- Physiological Parameters
('avg_hr', 'Durchschnittspuls', 'Average Heart Rate', 'physiological', 'integer', 'bpm', 'hr_avg',
'{"min": 30, "max": 220}'::jsonb,
'Durchschnittliche Herzfrequenz',
'Average heart rate'),
('max_hr', 'Maximalpuls', 'Max Heart Rate', 'physiological', 'integer', 'bpm', 'hr_max',
'{"min": 40, "max": 220}'::jsonb,
'Maximale Herzfrequenz',
'Maximum heart rate'),
('min_hr', 'Minimalpuls', 'Min Heart Rate', 'physiological', 'integer', 'bpm', 'hr_min',
'{"min": 30, "max": 200}'::jsonb,
'Minimale Herzfrequenz',
'Minimum heart rate'),
('avg_power', 'Durchschnittsleistung', 'Average Power', 'physiological', 'integer', 'W', 'avg_power',
'{"min": 0, "max": 1000}'::jsonb,
'Durchschnittliche Leistung in Watt',
'Average power output in watts'),
-- Subjective Parameters
('rpe', 'RPE (Anstrengung)', 'RPE (Perceived Exertion)', 'subjective', 'integer', 'scale', 'rpe',
'{"min": 1, "max": 10}'::jsonb,
'Subjektive Anstrengung (Rate of Perceived Exertion)',
'Rate of Perceived Exertion'),
-- Environmental Parameters
('temperature_celsius', 'Temperatur', 'Temperature', 'environmental', 'float', '°C', 'temperature_celsius',
'{"min": -30, "max": 50}'::jsonb,
'Umgebungstemperatur in Celsius',
'Ambient temperature in Celsius'),
('humidity_percent', 'Luftfeuchtigkeit', 'Humidity', 'environmental', 'integer', '%', 'humidity_percent',
'{"min": 0, "max": 100}'::jsonb,
'Relative Luftfeuchtigkeit in Prozent',
'Relative humidity in percent'),
-- Performance Parameters (calculated)
('avg_hr_percent', '% Max-HF', '% Max HR', 'performance', 'float', '%', 'avg_hr_percent',
'{"min": 0, "max": 100}'::jsonb,
'Durchschnittspuls als Prozent der maximalen Herzfrequenz',
'Average heart rate as percentage of max heart rate'),
('kcal_per_km', 'Kalorien pro km', 'Calories per km', 'performance', 'float', 'kcal/km', 'kcal_per_km',
'{"min": 0, "max": 1000}'::jsonb,
'Kalorienverbrauch pro Kilometer',
'Calorie burn per kilometer');
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- SUMMARY
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- Display inserted parameters
DO $$
BEGIN
RAISE NOTICE '✓ Migration 013 completed';
RAISE NOTICE ' - Created training_parameters table';
RAISE NOTICE ' - Inserted % standard parameters', (SELECT COUNT(*) FROM training_parameters);
END $$;

View File

@ -0,0 +1,114 @@
-- Migration 014: Training Type Profiles & Activity Evaluation
-- Training Type Profiles System - Schema Extensions
-- Date: 2026-03-23
-- Issue: #15
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- EXTEND TRAINING TYPES
-- Add profile column for comprehensive training type configuration
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
ALTER TABLE training_types ADD COLUMN IF NOT EXISTS profile JSONB DEFAULT NULL;
CREATE INDEX idx_training_types_profile_enabled ON training_types
((profile->'rule_sets'->'minimum_requirements'->>'enabled'))
WHERE profile IS NOT NULL;
COMMENT ON COLUMN training_types.profile IS 'Comprehensive training type profile with 7 dimensions (rule_sets, intensity_zones, training_effects, periodization, performance_indicators, safety, ai_context)';
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- EXTEND ACTIVITY LOG
-- Add evaluation results and quality labels
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS evaluation JSONB DEFAULT NULL;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS quality_label VARCHAR(20);
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS overall_score FLOAT;
CREATE INDEX idx_activity_quality_label ON activity_log(quality_label)
WHERE quality_label IS NOT NULL;
CREATE INDEX idx_activity_overall_score ON activity_log(overall_score DESC)
WHERE overall_score IS NOT NULL;
CREATE INDEX idx_activity_evaluation_passed ON activity_log
((evaluation->'rule_set_results'->'minimum_requirements'->>'passed'))
WHERE evaluation IS NOT NULL;
COMMENT ON COLUMN activity_log.evaluation IS 'Complete evaluation result (7 dimensions, scores, recommendations, warnings)';
COMMENT ON COLUMN activity_log.quality_label IS 'Quality label: excellent, good, acceptable, poor (for quick filtering)';
COMMENT ON COLUMN activity_log.overall_score IS 'Overall quality score 0.0-1.0 (for sorting)';
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- ADD MISSING COLUMNS (if not already added by previous migrations)
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- Add HR columns if not exist (might be in Migration 008)
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
WHERE table_name='activity_log' AND column_name='hr_min') THEN
ALTER TABLE activity_log ADD COLUMN hr_min INTEGER CHECK (hr_min > 0 AND hr_min < 200);
END IF;
END $$;
-- Add performance columns for calculated values
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS avg_hr_percent FLOAT;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS kcal_per_km FLOAT;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS pace_min_per_km FLOAT;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS cadence INTEGER;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS avg_power INTEGER;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS elevation_gain INTEGER;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS temperature_celsius FLOAT;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS humidity_percent INTEGER;
COMMENT ON COLUMN activity_log.avg_hr_percent IS 'Average HR as percentage of user max HR (calculated)';
COMMENT ON COLUMN activity_log.kcal_per_km IS 'Calories burned per kilometer (calculated)';
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- HELPER FUNCTION: Calculate avg_hr_percent
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
CREATE OR REPLACE FUNCTION calculate_avg_hr_percent()
RETURNS TRIGGER AS $$
DECLARE
user_max_hr INTEGER;
BEGIN
-- Get user's max HR from profile
SELECT hf_max INTO user_max_hr
FROM profiles
WHERE id = NEW.profile_id;
-- Calculate percentage if both values exist
IF NEW.hr_avg IS NOT NULL AND user_max_hr IS NOT NULL AND user_max_hr > 0 THEN
NEW.avg_hr_percent := (NEW.hr_avg::float / user_max_hr::float) * 100;
END IF;
-- Calculate kcal per km
IF NEW.kcal_active IS NOT NULL AND NEW.distance_km IS NOT NULL AND NEW.distance_km > 0 THEN
NEW.kcal_per_km := NEW.kcal_active::float / NEW.distance_km;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger for automatic calculation
DROP TRIGGER IF EXISTS trg_calculate_performance_metrics ON activity_log;
CREATE TRIGGER trg_calculate_performance_metrics
BEFORE INSERT OR UPDATE ON activity_log
FOR EACH ROW
EXECUTE FUNCTION calculate_avg_hr_percent();
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- SUMMARY
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
DO $$
BEGIN
RAISE NOTICE '✓ Migration 014 completed';
RAISE NOTICE ' - Extended training_types with profile column';
RAISE NOTICE ' - Extended activity_log with evaluation columns';
RAISE NOTICE ' - Added performance metric calculations';
RAISE NOTICE ' - Created indexes for fast queries';
END $$;

View File

@ -0,0 +1,29 @@
-- Migration 014: Extended Vitals (Blood Pressure, VO2 Max, SpO2, Respiratory Rate)
-- v9d Phase 2d: Complete vitals tracking
-- Date: 2026-03-23
-- Add new vitals fields
ALTER TABLE vitals_log
ADD COLUMN IF NOT EXISTS blood_pressure_systolic INTEGER CHECK (blood_pressure_systolic > 0 AND blood_pressure_systolic < 300),
ADD COLUMN IF NOT EXISTS blood_pressure_diastolic INTEGER CHECK (blood_pressure_diastolic > 0 AND blood_pressure_diastolic < 200),
ADD COLUMN IF NOT EXISTS pulse INTEGER CHECK (pulse > 0 AND pulse < 250),
ADD COLUMN IF NOT EXISTS vo2_max DECIMAL(4,1) CHECK (vo2_max > 0 AND vo2_max < 100),
ADD COLUMN IF NOT EXISTS spo2 INTEGER CHECK (spo2 >= 70 AND spo2 <= 100),
ADD COLUMN IF NOT EXISTS respiratory_rate DECIMAL(4,1) CHECK (respiratory_rate > 0 AND respiratory_rate < 60),
ADD COLUMN IF NOT EXISTS irregular_heartbeat BOOLEAN DEFAULT false,
ADD COLUMN IF NOT EXISTS possible_afib BOOLEAN DEFAULT false;
-- Update source check to include omron
ALTER TABLE vitals_log DROP CONSTRAINT IF EXISTS vitals_log_source_check;
ALTER TABLE vitals_log ADD CONSTRAINT vitals_log_source_check
CHECK (source IN ('manual', 'apple_health', 'garmin', 'omron'));
-- Comments
COMMENT ON COLUMN vitals_log.blood_pressure_systolic IS 'Systolic blood pressure (mmHg) from Omron or manual entry';
COMMENT ON COLUMN vitals_log.blood_pressure_diastolic IS 'Diastolic blood pressure (mmHg) from Omron or manual entry';
COMMENT ON COLUMN vitals_log.pulse IS 'Pulse during blood pressure measurement (bpm)';
COMMENT ON COLUMN vitals_log.vo2_max IS 'VO2 Max from Apple Watch (ml/kg/min)';
COMMENT ON COLUMN vitals_log.spo2 IS 'Blood oxygen saturation (%) from Apple Watch';
COMMENT ON COLUMN vitals_log.respiratory_rate IS 'Respiratory rate (breaths/min) from Apple Watch';
COMMENT ON COLUMN vitals_log.irregular_heartbeat IS 'Irregular heartbeat detected (Omron)';
COMMENT ON COLUMN vitals_log.possible_afib IS 'Possible atrial fibrillation (Omron)';

View File

@ -0,0 +1,184 @@
-- Migration 015: Vitals Refactoring - Trennung Baseline vs. Context-Dependent
-- v9d Phase 2d: Architektur-Verbesserung für bessere Datenqualität
-- Date: 2026-03-23
-- ══════════════════════════════════════════════════════════════════════════════
-- STEP 1: Create new tables
-- ══════════════════════════════════════════════════════════════════════════════
-- Baseline Vitals (slow-changing, once daily, morning measurement)
CREATE TABLE IF NOT EXISTS vitals_baseline (
id SERIAL PRIMARY KEY,
profile_id UUID NOT NULL REFERENCES profiles(id) ON DELETE CASCADE,
date DATE NOT NULL,
-- Core baseline vitals
resting_hr INTEGER CHECK (resting_hr > 0 AND resting_hr < 120),
hrv INTEGER CHECK (hrv > 0 AND hrv < 300),
vo2_max DECIMAL(4,1) CHECK (vo2_max > 0 AND vo2_max < 100),
spo2 INTEGER CHECK (spo2 >= 70 AND spo2 <= 100),
respiratory_rate DECIMAL(4,1) CHECK (respiratory_rate > 0 AND respiratory_rate < 60),
-- Future baseline vitals (prepared for expansion)
body_temperature DECIMAL(3,1) CHECK (body_temperature > 30 AND body_temperature < 45),
resting_metabolic_rate INTEGER CHECK (resting_metabolic_rate > 0),
-- Metadata
note TEXT,
source VARCHAR(20) DEFAULT 'manual' CHECK (source IN ('manual', 'apple_health', 'garmin', 'withings')),
created_at TIMESTAMP DEFAULT NOW(),
updated_at TIMESTAMP DEFAULT NOW(),
CONSTRAINT unique_baseline_per_day UNIQUE(profile_id, date)
);
CREATE INDEX idx_vitals_baseline_profile_date ON vitals_baseline(profile_id, date DESC);
COMMENT ON TABLE vitals_baseline IS 'v9d Phase 2d: Baseline vitals measured once daily (morning, fasted)';
COMMENT ON COLUMN vitals_baseline.resting_hr IS 'Resting heart rate (bpm) - measured in the morning before getting up';
COMMENT ON COLUMN vitals_baseline.hrv IS 'Heart rate variability (ms) - higher is better';
COMMENT ON COLUMN vitals_baseline.vo2_max IS 'VO2 Max (ml/kg/min) - estimated by Apple Watch or lab test';
COMMENT ON COLUMN vitals_baseline.spo2 IS 'Blood oxygen saturation (%) - baseline measurement';
COMMENT ON COLUMN vitals_baseline.respiratory_rate IS 'Respiratory rate (breaths/min) - baseline measurement';
-- ══════════════════════════════════════════════════════════════════════════════
-- Blood Pressure Log (context-dependent, multiple times per day)
CREATE TABLE IF NOT EXISTS blood_pressure_log (
id SERIAL PRIMARY KEY,
profile_id UUID NOT NULL REFERENCES profiles(id) ON DELETE CASCADE,
measured_at TIMESTAMP NOT NULL,
-- Blood pressure measurements
systolic INTEGER NOT NULL CHECK (systolic > 0 AND systolic < 300),
diastolic INTEGER NOT NULL CHECK (diastolic > 0 AND diastolic < 200),
pulse INTEGER CHECK (pulse > 0 AND pulse < 250),
-- Context tagging for correlation analysis
context VARCHAR(30) CHECK (context IN (
'morning_fasted', -- Morgens nüchtern
'after_meal', -- Nach dem Essen
'before_training', -- Vor dem Training
'after_training', -- Nach dem Training
'evening', -- Abends
'stress', -- Bei Stress
'resting', -- Ruhemessung
'other' -- Sonstiges
)),
-- Warning flags (Omron)
irregular_heartbeat BOOLEAN DEFAULT false,
possible_afib BOOLEAN DEFAULT false,
-- Metadata
note TEXT,
source VARCHAR(20) DEFAULT 'manual' CHECK (source IN ('manual', 'omron', 'apple_health', 'withings')),
created_at TIMESTAMP DEFAULT NOW(),
CONSTRAINT unique_bp_measurement UNIQUE(profile_id, measured_at)
);
CREATE INDEX idx_blood_pressure_profile_datetime ON blood_pressure_log(profile_id, measured_at DESC);
CREATE INDEX idx_blood_pressure_context ON blood_pressure_log(context) WHERE context IS NOT NULL;
COMMENT ON TABLE blood_pressure_log IS 'v9d Phase 2d: Blood pressure measurements (multiple per day, context-aware)';
COMMENT ON COLUMN blood_pressure_log.context IS 'Measurement context for correlation analysis';
COMMENT ON COLUMN blood_pressure_log.irregular_heartbeat IS 'Irregular heartbeat detected (Omron device)';
COMMENT ON COLUMN blood_pressure_log.possible_afib IS 'Possible atrial fibrillation (Omron device)';
-- ══════════════════════════════════════════════════════════════════════════════
-- STEP 2: Migrate existing data from vitals_log
-- ══════════════════════════════════════════════════════════════════════════════
-- Migrate baseline vitals (RHR, HRV, VO2 Max, SpO2, Respiratory Rate)
INSERT INTO vitals_baseline (
profile_id, date,
resting_hr, hrv, vo2_max, spo2, respiratory_rate,
note, source, created_at, updated_at
)
SELECT
profile_id, date,
resting_hr, hrv, vo2_max, spo2, respiratory_rate,
note, source, created_at, updated_at
FROM vitals_log
WHERE resting_hr IS NOT NULL
OR hrv IS NOT NULL
OR vo2_max IS NOT NULL
OR spo2 IS NOT NULL
OR respiratory_rate IS NOT NULL
ON CONFLICT (profile_id, date) DO NOTHING;
-- Migrate blood pressure measurements
-- Note: Use date + 08:00 as default timestamp (morning measurement)
INSERT INTO blood_pressure_log (
profile_id, measured_at,
systolic, diastolic, pulse,
irregular_heartbeat, possible_afib,
note, source, created_at
)
SELECT
profile_id,
(date + TIME '08:00:00')::timestamp AS measured_at,
blood_pressure_systolic,
blood_pressure_diastolic,
pulse,
irregular_heartbeat,
possible_afib,
note,
CASE
WHEN source = 'manual' THEN 'manual'
WHEN source = 'omron' THEN 'omron'
ELSE 'manual'
END AS source,
created_at
FROM vitals_log
WHERE blood_pressure_systolic IS NOT NULL
AND blood_pressure_diastolic IS NOT NULL
ON CONFLICT (profile_id, measured_at) DO NOTHING;
-- ══════════════════════════════════════════════════════════════════════════════
-- STEP 3: Drop old vitals_log table (backup first)
-- ══════════════════════════════════════════════════════════════════════════════
-- Rename old table as backup (keep for safety, can be dropped later)
ALTER TABLE vitals_log RENAME TO vitals_log_backup_pre_015;
-- Drop old index (it's on the renamed table now)
DROP INDEX IF EXISTS idx_vitals_profile_date;
-- ══════════════════════════════════════════════════════════════════════════════
-- STEP 4: Prepared for future vitals types
-- ══════════════════════════════════════════════════════════════════════════════
-- Future tables (commented out, create when needed):
-- Glucose Log (for blood sugar tracking)
-- CREATE TABLE glucose_log (
-- id SERIAL PRIMARY KEY,
-- profile_id UUID NOT NULL REFERENCES profiles(id) ON DELETE CASCADE,
-- measured_at TIMESTAMP NOT NULL,
-- glucose_mg_dl INTEGER NOT NULL CHECK (glucose_mg_dl > 0 AND glucose_mg_dl < 500),
-- context VARCHAR(30) CHECK (context IN (
-- 'fasted', 'before_meal', 'after_meal_1h', 'after_meal_2h', 'before_training', 'after_training', 'other'
-- )),
-- note TEXT,
-- source VARCHAR(20) DEFAULT 'manual',
-- created_at TIMESTAMP DEFAULT NOW(),
-- CONSTRAINT unique_glucose_measurement UNIQUE(profile_id, measured_at)
-- );
-- Temperature Log (for illness tracking)
-- CREATE TABLE temperature_log (
-- id SERIAL PRIMARY KEY,
-- profile_id UUID NOT NULL REFERENCES profiles(id) ON DELETE CASCADE,
-- measured_at TIMESTAMP NOT NULL,
-- temperature_celsius DECIMAL(3,1) NOT NULL CHECK (temperature_celsius > 30 AND temperature_celsius < 45),
-- measurement_location VARCHAR(20) CHECK (measurement_location IN ('oral', 'ear', 'forehead', 'armpit')),
-- note TEXT,
-- created_at TIMESTAMP DEFAULT NOW(),
-- CONSTRAINT unique_temperature_measurement UNIQUE(profile_id, measured_at)
-- );
-- ══════════════════════════════════════════════════════════════════════════════
-- Migration complete
-- ══════════════════════════════════════════════════════════════════════════════

View File

@ -0,0 +1,349 @@
"""
Training Type Profiles - Master Evaluator
Comprehensive activity evaluation across all 7 dimensions.
Issue: #15
Date: 2026-03-23
"""
from typing import Dict, Optional, List
from datetime import datetime
import logging
from rule_engine import RuleEvaluator, IntensityZoneEvaluator, TrainingEffectsEvaluator
logger = logging.getLogger(__name__)
class TrainingProfileEvaluator:
"""
Master class for comprehensive activity evaluation.
Evaluates an activity against a training type profile across 7 dimensions:
1. Minimum Requirements (Quality Gates)
2. Intensity Zones (HR zones)
3. Training Effects (Abilities)
4. Periodization (Frequency & Recovery)
5. Performance Indicators (KPIs)
6. Safety (Warnings)
7. AI Context
"""
def __init__(self, parameters_registry: Dict[str, Dict]):
"""
Initialize evaluator with parameter registry.
Args:
parameters_registry: Dict mapping parameter_key -> config
"""
self.parameters_registry = parameters_registry
self.rule_evaluator = RuleEvaluator()
self.zone_evaluator = IntensityZoneEvaluator()
self.effects_evaluator = TrainingEffectsEvaluator()
def evaluate_activity(
self,
activity: Dict,
training_type_profile: Optional[Dict],
context: Optional[Dict] = None
) -> Dict:
"""
Complete evaluation of an activity against its training type profile.
Args:
activity: Activity data dictionary
training_type_profile: Training type profile (JSONB)
context: {
"user_profile": {...},
"recent_activities": [...],
"historical_activities": [...]
}
Returns:
{
"evaluated_at": ISO timestamp,
"profile_version": str,
"rule_set_results": {
"minimum_requirements": {...},
"intensity_zones": {...},
"training_effects": {...},
"periodization": {...},
"performance_indicators": {...},
"safety": {...}
},
"overall_score": float (0-1),
"quality_label": str,
"recommendations": [str],
"warnings": [str]
}
"""
# No profile? Return unvalidated result
if not training_type_profile:
return self._create_unvalidated_result()
rule_sets = training_type_profile.get("rule_sets", {})
context = context or {}
results = {
"evaluated_at": datetime.now().isoformat(),
"profile_version": training_type_profile.get("version", "unknown"),
"rule_set_results": {}
}
# ━━━ 1. MINIMUM REQUIREMENTS ━━━
if "minimum_requirements" in rule_sets:
results["rule_set_results"]["minimum_requirements"] = \
self.rule_evaluator.evaluate_rule_set(
rule_sets["minimum_requirements"],
activity,
self.parameters_registry
)
# ━━━ 2. INTENSITY ZONES ━━━
if "intensity_zones" in rule_sets:
results["rule_set_results"]["intensity_zones"] = \
self.zone_evaluator.evaluate(
rule_sets["intensity_zones"],
activity,
context.get("user_profile", {})
)
# ━━━ 3. TRAINING EFFECTS ━━━
if "training_effects" in rule_sets:
results["rule_set_results"]["training_effects"] = \
self.effects_evaluator.evaluate(
rule_sets["training_effects"],
activity,
results["rule_set_results"].get("intensity_zones")
)
# ━━━ 4. PERIODIZATION ━━━
if "periodization" in rule_sets:
results["rule_set_results"]["periodization"] = \
self._evaluate_periodization(
rule_sets["periodization"],
activity,
context.get("recent_activities", [])
)
# ━━━ 5. PERFORMANCE INDICATORS ━━━
if "performance_indicators" in rule_sets:
results["rule_set_results"]["performance_indicators"] = \
self._evaluate_performance(
rule_sets["performance_indicators"],
activity,
context.get("historical_activities", [])
)
# ━━━ 6. SAFETY WARNINGS ━━━
if "safety" in rule_sets:
results["rule_set_results"]["safety"] = \
self._evaluate_safety(
rule_sets["safety"],
activity
)
# ━━━ OVERALL SCORE & QUALITY LABEL ━━━
overall_score = self._calculate_overall_score(results["rule_set_results"])
results["overall_score"] = overall_score
results["quality_label"] = self._get_quality_label(overall_score)
# ━━━ RECOMMENDATIONS & WARNINGS ━━━
results["recommendations"] = self._generate_recommendations(results)
results["warnings"] = self._collect_warnings(results)
return results
def _create_unvalidated_result(self) -> Dict:
"""Creates result for activities without profile."""
return {
"evaluated_at": datetime.now().isoformat(),
"profile_version": None,
"rule_set_results": {},
"overall_score": None,
"quality_label": None,
"recommendations": ["Kein Trainingsprofil konfiguriert"],
"warnings": []
}
def _evaluate_periodization(
self,
config: Dict,
activity: Dict,
recent_activities: List[Dict]
) -> Dict:
"""
Evaluates periodization compliance (frequency & recovery).
Simplified for MVP - full implementation later.
"""
if not config.get("enabled", False):
return {"enabled": False}
# Basic frequency check
training_type_id = activity.get("training_type_id")
same_type_this_week = sum(
1 for a in recent_activities
if a.get("training_type_id") == training_type_id
)
frequency_config = config.get("frequency", {})
optimal = frequency_config.get("per_week_optimal", 3)
return {
"enabled": True,
"weekly_count": same_type_this_week,
"optimal_count": optimal,
"frequency_status": "optimal" if same_type_this_week <= optimal else "over_optimal",
"recovery_adequate": True, # Simplified for MVP
"warning": None
}
def _evaluate_performance(
self,
config: Dict,
activity: Dict,
historical_activities: List[Dict]
) -> Dict:
"""
Evaluates performance development.
Simplified for MVP - full implementation later.
"""
if not config.get("enabled", False):
return {"enabled": False}
return {
"enabled": True,
"trend": "stable", # Simplified
"metrics_comparison": {},
"benchmark_level": "intermediate"
}
def _evaluate_safety(self, config: Dict, activity: Dict) -> Dict:
"""
Evaluates safety warnings.
"""
if not config.get("enabled", False):
return {"enabled": False, "warnings": []}
warnings_config = config.get("warnings", [])
triggered_warnings = []
for warning_rule in warnings_config:
param_key = warning_rule.get("parameter")
operator = warning_rule.get("operator")
threshold = warning_rule.get("value")
severity = warning_rule.get("severity", "medium")
message = warning_rule.get("message", "")
actual_value = activity.get(param_key)
if actual_value is not None:
operator_func = RuleEvaluator.OPERATORS.get(operator)
if operator_func and operator_func(actual_value, threshold):
triggered_warnings.append({
"severity": severity,
"message": message,
"parameter": param_key,
"actual_value": actual_value,
"threshold": threshold
})
return {
"enabled": True,
"warnings": triggered_warnings
}
def _calculate_overall_score(self, rule_set_results: Dict) -> float:
"""
Calculates weighted overall score.
Weights:
- Minimum Requirements: 40%
- Intensity Zones: 20%
- Periodization: 20%
- Performance: 10%
- Training Effects: 10%
"""
weights = {
"minimum_requirements": 0.4,
"intensity_zones": 0.2,
"periodization": 0.2,
"performance_indicators": 0.1,
"training_effects": 0.1
}
total_score = 0.0
total_weight = 0.0
for rule_set_name, weight in weights.items():
result = rule_set_results.get(rule_set_name)
if result and result.get("enabled"):
score = result.get("score", 0.5)
# Special handling for different result types
if rule_set_name == "intensity_zones":
score = result.get("duration_quality", 0.5)
elif rule_set_name == "periodization":
score = 1.0 if result.get("recovery_adequate", False) else 0.5
total_score += score * weight
total_weight += weight
return round(total_score / total_weight, 2) if total_weight > 0 else 0.5
def _get_quality_label(self, score: Optional[float]) -> Optional[str]:
"""Converts score to quality label."""
if score is None:
return None
if score >= 0.9:
return "excellent"
elif score >= 0.7:
return "good"
elif score >= 0.5:
return "acceptable"
else:
return "poor"
def _generate_recommendations(self, results: Dict) -> List[str]:
"""Generates actionable recommendations."""
recommendations = []
# Check minimum requirements
min_req = results["rule_set_results"].get("minimum_requirements", {})
if min_req.get("enabled") and not min_req.get("passed"):
for failed in min_req.get("failed_rules", []):
param = failed.get("parameter")
actual = failed.get("actual_value")
expected = failed.get("expected_value")
reason = failed.get("reason", "")
symbol = failed.get("operator_symbol", "")
recommendations.append(
f"{param}: {actual} {symbol} {expected} - {reason}"
)
# Check intensity zones
zone_result = results["rule_set_results"].get("intensity_zones", {})
if zone_result.get("enabled") and zone_result.get("recommendation"):
recommendations.append(zone_result["recommendation"])
# Default recommendation if excellent
if results.get("quality_label") == "excellent" and not recommendations:
recommendations.append("Hervorragendes Training! Weiter so.")
return recommendations
def _collect_warnings(self, results: Dict) -> List[str]:
"""Collects all warnings from safety checks."""
safety_result = results["rule_set_results"].get("safety", {})
if not safety_result.get("enabled"):
return []
warnings = []
for warning in safety_result.get("warnings", []):
severity_icon = "🔴" if warning["severity"] == "high" else "⚠️"
warnings.append(f"{severity_icon} {warning['message']}")
return warnings

View File

@ -0,0 +1,450 @@
"""
Training Type Profile Templates
Pre-configured profiles for common training types.
Issue: #15
Date: 2026-03-23
"""
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TEMPLATE: LAUFEN (Running) - Ausdauer-fokussiert
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
TEMPLATE_RUNNING = {
"version": "1.0",
"name": "Laufen (Standard)",
"description": "Ausdauerlauf mit Herzfrequenz-Zonen",
"rule_sets": {
"minimum_requirements": {
"enabled": True,
"pass_strategy": "weighted_score",
"pass_threshold": 0.6,
"rules": [
{
"parameter": "duration_min",
"operator": "gte",
"value": 15,
"weight": 5,
"optional": False,
"reason": "Mindestens 15 Minuten für Trainingseffekt"
},
{
"parameter": "avg_hr",
"operator": "gte",
"value": 100,
"weight": 3,
"optional": False,
"reason": "Puls muss für Ausdauerreiz erhöht sein"
},
{
"parameter": "distance_km",
"operator": "gte",
"value": 1.0,
"weight": 2,
"optional": False,
"reason": "Mindestens 1 km Distanz"
}
]
},
"intensity_zones": {
"enabled": True,
"zones": [
{
"id": "regeneration",
"name": "Regeneration",
"color": "#4CAF50",
"effect": "Aktive Erholung",
"target_duration_min": 30,
"rules": [
{
"parameter": "avg_hr_percent",
"operator": "between",
"value": [50, 60]
}
]
},
{
"id": "grundlagenausdauer",
"name": "Grundlagenausdauer",
"color": "#2196F3",
"effect": "Fettverbrennung, aerobe Basis",
"target_duration_min": 45,
"rules": [
{
"parameter": "avg_hr_percent",
"operator": "between",
"value": [60, 70]
}
]
},
{
"id": "entwicklungsbereich",
"name": "Entwicklungsbereich",
"color": "#FF9800",
"effect": "VO2max-Training, Laktattoleranz",
"target_duration_min": 30,
"rules": [
{
"parameter": "avg_hr_percent",
"operator": "between",
"value": [70, 80]
}
]
},
{
"id": "schwellentraining",
"name": "Schwellentraining",
"color": "#F44336",
"effect": "Anaerobe Schwelle, Wettkampftempo",
"target_duration_min": 20,
"rules": [
{
"parameter": "avg_hr_percent",
"operator": "between",
"value": [80, 90]
}
]
}
]
},
"training_effects": {
"enabled": True,
"default_effects": {
"primary_abilities": [
{
"category": "konditionell",
"ability": "ausdauer",
"intensity": 5
}
],
"secondary_abilities": [
{
"category": "konditionell",
"ability": "schnelligkeit",
"intensity": 2
},
{
"category": "koordinativ",
"ability": "rhythmus",
"intensity": 3
},
{
"category": "psychisch",
"ability": "willenskraft",
"intensity": 4
}
]
},
"metabolic_focus": ["aerobic", "fat_oxidation"],
"muscle_groups": ["legs", "core", "cardiovascular"]
},
"periodization": {
"enabled": True,
"frequency": {
"per_week_optimal": 3,
"per_week_max": 5
},
"recovery": {
"min_hours_between": 24
}
},
"performance_indicators": {
"enabled": False
},
"safety": {
"enabled": True,
"warnings": [
{
"parameter": "avg_hr_percent",
"operator": "gt",
"value": 95,
"severity": "high",
"message": "Herzfrequenz zu hoch - Überbelastungsrisiko"
},
{
"parameter": "duration_min",
"operator": "gt",
"value": 180,
"severity": "medium",
"message": "Sehr lange Einheit - achte auf Regeneration"
}
]
}
}
}
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TEMPLATE: MEDITATION - Mental-fokussiert (≤ statt ≥ bei HR!)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
TEMPLATE_MEDITATION = {
"version": "1.0",
"name": "Meditation (Standard)",
"description": "Mentales Training mit niedrigem Puls",
"rule_sets": {
"minimum_requirements": {
"enabled": True,
"pass_strategy": "weighted_score",
"pass_threshold": 0.6,
"rules": [
{
"parameter": "duration_min",
"operator": "gte",
"value": 5,
"weight": 5,
"optional": False,
"reason": "Mindestens 5 Minuten für Entspannungseffekt"
},
{
"parameter": "avg_hr",
"operator": "lte",
"value": 80,
"weight": 4,
"optional": False,
"reason": "Niedriger Puls zeigt Entspannung an"
}
]
},
"intensity_zones": {
"enabled": True,
"zones": [
{
"id": "deep_relaxation",
"name": "Tiefenentspannung",
"color": "#4CAF50",
"effect": "Parasympathikus-Aktivierung",
"target_duration_min": 10,
"rules": [
{
"parameter": "avg_hr_percent",
"operator": "between",
"value": [35, 45]
}
]
},
{
"id": "light_meditation",
"name": "Leichte Meditation",
"color": "#2196F3",
"effect": "Achtsamkeit, Fokus",
"target_duration_min": 15,
"rules": [
{
"parameter": "avg_hr_percent",
"operator": "between",
"value": [45, 55]
}
]
}
]
},
"training_effects": {
"enabled": True,
"default_effects": {
"primary_abilities": [
{
"category": "kognitiv",
"ability": "konzentration",
"intensity": 5
},
{
"category": "psychisch",
"ability": "stressresistenz",
"intensity": 5
}
],
"secondary_abilities": [
{
"category": "kognitiv",
"ability": "wahrnehmung",
"intensity": 4
},
{
"category": "psychisch",
"ability": "selbstvertrauen",
"intensity": 3
}
]
},
"metabolic_focus": ["parasympathetic_activation"],
"muscle_groups": []
},
"periodization": {
"enabled": True,
"frequency": {
"per_week_optimal": 5,
"per_week_max": 7
},
"recovery": {
"min_hours_between": 0
}
},
"performance_indicators": {
"enabled": False
},
"safety": {
"enabled": True,
"warnings": [
{
"parameter": "avg_hr",
"operator": "gt",
"value": 100,
"severity": "medium",
"message": "Herzfrequenz zu hoch für Meditation - bist du wirklich entspannt?"
}
]
}
}
}
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TEMPLATE: KRAFTTRAINING - Kraft-fokussiert
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
TEMPLATE_STRENGTH = {
"version": "1.0",
"name": "Krafttraining (Standard)",
"description": "Krafttraining mit moderater Herzfrequenz",
"rule_sets": {
"minimum_requirements": {
"enabled": True,
"pass_strategy": "weighted_score",
"pass_threshold": 0.5,
"rules": [
{
"parameter": "duration_min",
"operator": "gte",
"value": 20,
"weight": 5,
"optional": False,
"reason": "Mindestens 20 Minuten für Muskelreiz"
},
{
"parameter": "kcal_active",
"operator": "gte",
"value": 100,
"weight": 2,
"optional": True,
"reason": "Mindest-Kalorienverbrauch"
}
]
},
"intensity_zones": {
"enabled": False
},
"training_effects": {
"enabled": True,
"default_effects": {
"primary_abilities": [
{
"category": "konditionell",
"ability": "kraft",
"intensity": 5
}
],
"secondary_abilities": [
{
"category": "koordinativ",
"ability": "differenzierung",
"intensity": 3
},
{
"category": "psychisch",
"ability": "willenskraft",
"intensity": 4
}
]
},
"metabolic_focus": ["anaerobic", "muscle_growth"],
"muscle_groups": ["full_body"]
},
"periodization": {
"enabled": True,
"frequency": {
"per_week_optimal": 3,
"per_week_max": 5
},
"recovery": {
"min_hours_between": 48
}
},
"performance_indicators": {
"enabled": False
},
"safety": {
"enabled": True,
"warnings": []
}
}
}
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TEMPLATE REGISTRY
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
TEMPLATES = {
"running": {
"name_de": "Laufen",
"name_en": "Running",
"icon": "🏃",
"categories": ["cardio", "running"],
"template": TEMPLATE_RUNNING
},
"meditation": {
"name_de": "Meditation",
"name_en": "Meditation",
"icon": "🧘",
"categories": ["geist", "meditation"],
"template": TEMPLATE_MEDITATION
},
"strength": {
"name_de": "Krafttraining",
"name_en": "Strength Training",
"icon": "💪",
"categories": ["kraft", "krafttraining"],
"template": TEMPLATE_STRENGTH
}
}
def get_template(template_key: str) -> dict:
"""Get profile template by key."""
template_info = TEMPLATES.get(template_key)
if not template_info:
return None
return template_info["template"]
def list_templates() -> list:
"""List all available templates."""
return [
{
"key": key,
"name_de": info["name_de"],
"name_en": info["name_en"],
"icon": info["icon"],
"categories": info["categories"]
}
for key, info in TEMPLATES.items()
]

View File

@ -8,3 +8,4 @@ pydantic==2.7.1
bcrypt==4.1.3
slowapi==0.1.9
psycopg2-binary==2.9.9
python-dateutil==2.9.0

View File

@ -17,6 +17,15 @@ from models import ActivityEntry
from routers.profiles import get_pid
from feature_logger import log_feature_usage
# Evaluation import with error handling (Phase 1.2)
try:
from evaluation_helper import evaluate_and_save_activity
EVALUATION_AVAILABLE = True
except Exception as e:
logger.warning(f"[AUTO-EVAL] Evaluation system not available: {e}")
EVALUATION_AVAILABLE = False
evaluate_and_save_activity = None
router = APIRouter(prefix="/api/activity", tags=["activity"])
logger = logging.getLogger(__name__)
@ -64,6 +73,27 @@ def create_activity(e: ActivityEntry, x_profile_id: Optional[str]=Header(default
d['kcal_active'],d['kcal_resting'],d['hr_avg'],d['hr_max'],d['distance_km'],
d['rpe'],d['source'],d['notes']))
# Phase 1.2: Auto-evaluation after INSERT
if EVALUATION_AVAILABLE:
# Load the activity data to evaluate
cur.execute("""
SELECT id, profile_id, date, training_type_id, duration_min,
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
rpe, pace_min_per_km, cadence, elevation_gain
FROM activity_log
WHERE id = %s
""", (eid,))
activity_row = cur.fetchone()
if activity_row:
activity_dict = dict(activity_row)
training_type_id = activity_dict.get("training_type_id")
if training_type_id:
try:
evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid)
logger.info(f"[AUTO-EVAL] Evaluated activity {eid} on INSERT")
except Exception as eval_error:
logger.error(f"[AUTO-EVAL] Failed to evaluate activity {eid}: {eval_error}")
# Phase 2: Increment usage counter (always for new entries)
increment_feature_usage(pid, 'activity_entries')
@ -79,6 +109,28 @@ def update_activity(eid: str, e: ActivityEntry, x_profile_id: Optional[str]=Head
cur = get_cursor(conn)
cur.execute(f"UPDATE activity_log SET {', '.join(f'{k}=%s' for k in d)} WHERE id=%s AND profile_id=%s",
list(d.values())+[eid,pid])
# Phase 1.2: Auto-evaluation after UPDATE
if EVALUATION_AVAILABLE:
# Load the updated activity data to evaluate
cur.execute("""
SELECT id, profile_id, date, training_type_id, duration_min,
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
rpe, pace_min_per_km, cadence, elevation_gain
FROM activity_log
WHERE id = %s
""", (eid,))
activity_row = cur.fetchone()
if activity_row:
activity_dict = dict(activity_row)
training_type_id = activity_dict.get("training_type_id")
if training_type_id:
try:
evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid)
logger.info(f"[AUTO-EVAL] Re-evaluated activity {eid} on UPDATE")
except Exception as eval_error:
logger.error(f"[AUTO-EVAL] Failed to re-evaluate activity {eid}: {eval_error}")
return {"id":eid}
@ -214,6 +266,31 @@ def bulk_categorize_activities(
""", (training_type_id, training_category, training_subcategory, pid, activity_type))
updated_count = cur.rowcount
# Phase 1.2: Auto-evaluation after bulk categorization
if EVALUATION_AVAILABLE:
# Load all activities that were just updated and evaluate them
cur.execute("""
SELECT id, profile_id, date, training_type_id, duration_min,
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
rpe, pace_min_per_km, cadence, elevation_gain
FROM activity_log
WHERE profile_id = %s
AND activity_type = %s
AND training_type_id = %s
""", (pid, activity_type, training_type_id))
activities_to_evaluate = cur.fetchall()
evaluated_count = 0
for activity_row in activities_to_evaluate:
activity_dict = dict(activity_row)
try:
evaluate_and_save_activity(cur, activity_dict["id"], activity_dict, training_type_id, pid)
evaluated_count += 1
except Exception as eval_error:
logger.warning(f"[AUTO-EVAL] Failed to evaluate bulk-categorized activity {activity_dict['id']}: {eval_error}")
logger.info(f"[AUTO-EVAL] Evaluated {evaluated_count}/{updated_count} bulk-categorized activities")
# Save mapping for future imports (upsert)
cur.execute("""
INSERT INTO activity_type_mappings (activity_type, training_type_id, profile_id, source, updated_at)
@ -275,6 +352,7 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional
if existing:
# Update existing entry (e.g., to add training type mapping)
existing_id = existing['id']
cur.execute("""
UPDATE activity_log
SET end_time = %s,
@ -297,22 +375,73 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional
tf(row.get('Max. Herzfrequenz (count/min)','')),
tf(row.get('Distanz (km)','')),
training_type_id, training_category, training_subcategory,
existing['id']
existing_id
))
skipped += 1 # Count as skipped (not newly inserted)
# Phase 1.2: Auto-evaluation after CSV import UPDATE
if EVALUATION_AVAILABLE and training_type_id:
try:
# Build activity dict for evaluation
activity_dict = {
"id": existing_id,
"profile_id": pid,
"date": date,
"training_type_id": training_type_id,
"duration_min": duration_min,
"hr_avg": tf(row.get('Durchschn. Herzfrequenz (count/min)','')),
"hr_max": tf(row.get('Max. Herzfrequenz (count/min)','')),
"distance_km": tf(row.get('Distanz (km)','')),
"kcal_active": kj(row.get('Aktive Energie (kJ)','')),
"kcal_resting": kj(row.get('Ruheeinträge (kJ)','')),
"rpe": None,
"pace_min_per_km": None,
"cadence": None,
"elevation_gain": None
}
evaluate_and_save_activity(cur, existing_id, activity_dict, training_type_id, pid)
logger.debug(f"[AUTO-EVAL] Re-evaluated updated activity {existing_id}")
except Exception as eval_error:
logger.warning(f"[AUTO-EVAL] Failed to re-evaluate updated activity {existing_id}: {eval_error}")
else:
# Insert new entry
new_id = str(uuid.uuid4())
cur.execute("""INSERT INTO activity_log
(id,profile_id,date,start_time,end_time,activity_type,duration_min,kcal_active,kcal_resting,
hr_avg,hr_max,distance_km,source,training_type_id,training_category,training_subcategory,created)
VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,'apple_health',%s,%s,%s,CURRENT_TIMESTAMP)""",
(str(uuid.uuid4()),pid,date,start,row.get('End',''),wtype,duration_min,
(new_id,pid,date,start,row.get('End',''),wtype,duration_min,
kj(row.get('Aktive Energie (kJ)','')),kj(row.get('Ruheeinträge (kJ)','')),
tf(row.get('Durchschn. Herzfrequenz (count/min)','')),
tf(row.get('Max. Herzfrequenz (count/min)','')),
tf(row.get('Distanz (km)','')),
training_type_id,training_category,training_subcategory))
inserted+=1
# Phase 1.2: Auto-evaluation after CSV import INSERT
if EVALUATION_AVAILABLE and training_type_id:
try:
# Build activity dict for evaluation
activity_dict = {
"id": new_id,
"profile_id": pid,
"date": date,
"training_type_id": training_type_id,
"duration_min": duration_min,
"hr_avg": tf(row.get('Durchschn. Herzfrequenz (count/min)','')),
"hr_max": tf(row.get('Max. Herzfrequenz (count/min)','')),
"distance_km": tf(row.get('Distanz (km)','')),
"kcal_active": kj(row.get('Aktive Energie (kJ)','')),
"kcal_resting": kj(row.get('Ruheeinträge (kJ)','')),
"rpe": None,
"pace_min_per_km": None,
"cadence": None,
"elevation_gain": None
}
evaluate_and_save_activity(cur, new_id, activity_dict, training_type_id, pid)
logger.debug(f"[AUTO-EVAL] Evaluated imported activity {new_id}")
except Exception as eval_error:
logger.warning(f"[AUTO-EVAL] Failed to evaluate imported activity {new_id}: {eval_error}")
except Exception as e:
logger.warning(f"Import row failed: {e}")
skipped+=1

View File

@ -11,6 +11,7 @@ from psycopg2.extras import Json
from db import get_db, get_cursor, r2d
from auth import require_auth, require_admin
from profile_templates import list_templates, get_template
router = APIRouter(prefix="/api/admin/training-types", tags=["admin", "training-types"])
logger = logging.getLogger(__name__)
@ -26,6 +27,7 @@ class TrainingTypeCreate(BaseModel):
description_en: Optional[str] = None
sort_order: int = 0
abilities: Optional[dict] = None
profile: Optional[dict] = None # Training Type Profile (Phase 2 #15)
class TrainingTypeUpdate(BaseModel):
@ -38,6 +40,7 @@ class TrainingTypeUpdate(BaseModel):
description_en: Optional[str] = None
sort_order: Optional[int] = None
abilities: Optional[dict] = None
profile: Optional[dict] = None # Training Type Profile (Phase 2 #15)
@router.get("")
@ -51,7 +54,7 @@ def list_training_types_admin(session: dict = Depends(require_admin)):
cur.execute("""
SELECT id, category, subcategory, name_de, name_en, icon,
description_de, description_en, sort_order, abilities,
created_at
profile, created_at
FROM training_types
ORDER BY sort_order, category, subcategory
""")
@ -68,7 +71,7 @@ def get_training_type(type_id: int, session: dict = Depends(require_admin)):
cur.execute("""
SELECT id, category, subcategory, name_de, name_en, icon,
description_de, description_en, sort_order, abilities,
created_at
profile, created_at
FROM training_types
WHERE id = %s
""", (type_id,))
@ -86,14 +89,15 @@ def create_training_type(data: TrainingTypeCreate, session: dict = Depends(requi
with get_db() as conn:
cur = get_cursor(conn)
# Convert abilities dict to JSONB
# Convert abilities and profile dict to JSONB
abilities_json = data.abilities if data.abilities else {}
profile_json = data.profile if data.profile else None
cur.execute("""
INSERT INTO training_types
(category, subcategory, name_de, name_en, icon,
description_de, description_en, sort_order, abilities)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)
description_de, description_en, sort_order, abilities, profile)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
RETURNING id
""", (
data.category,
@ -104,7 +108,8 @@ def create_training_type(data: TrainingTypeCreate, session: dict = Depends(requi
data.description_de,
data.description_en,
data.sort_order,
Json(abilities_json)
Json(abilities_json),
Json(profile_json) if profile_json else None
))
new_id = cur.fetchone()['id']
@ -155,6 +160,9 @@ def update_training_type(
if data.abilities is not None:
updates.append("abilities = %s")
values.append(Json(data.abilities))
if data.profile is not None:
updates.append("profile = %s")
values.append(Json(data.profile))
if not updates:
raise HTTPException(400, "No fields to update")
@ -280,3 +288,122 @@ def get_abilities_taxonomy(session: dict = Depends(require_auth)):
}
return taxonomy
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# TRAINING TYPE PROFILES - Phase 2 (#15)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@router.get("/profiles/templates")
def list_profile_templates(session: dict = Depends(require_admin)):
"""
List all available profile templates.
Returns templates for common training types (Running, Meditation, Strength, etc.)
"""
return list_templates()
@router.get("/profiles/templates/{template_key}")
def get_profile_template(template_key: str, session: dict = Depends(require_admin)):
"""
Get a specific profile template by key.
Keys: running, meditation, strength
"""
template = get_template(template_key)
if not template:
raise HTTPException(404, f"Template '{template_key}' not found")
return template
@router.post("/{type_id}/profile/apply-template")
def apply_profile_template(
type_id: int,
data: dict,
session: dict = Depends(require_admin)
):
"""
Apply a profile template to a training type.
Body: { "template_key": "running" }
"""
template_key = data.get("template_key")
if not template_key:
raise HTTPException(400, "template_key required")
template = get_template(template_key)
if not template:
raise HTTPException(404, f"Template '{template_key}' not found")
# Apply template to training type
with get_db() as conn:
cur = get_cursor(conn)
# Check if training type exists
cur.execute("SELECT id, name_de FROM training_types WHERE id = %s", (type_id,))
training_type = cur.fetchone()
if not training_type:
raise HTTPException(404, "Training type not found")
# Update profile
cur.execute("""
UPDATE training_types
SET profile = %s
WHERE id = %s
""", (Json(template), type_id))
logger.info(f"[ADMIN] Applied template '{template_key}' to training type {type_id} ({training_type['name_de']})")
return {
"message": f"Template '{template_key}' applied successfully",
"training_type_id": type_id,
"training_type_name": training_type['name_de'],
"template_key": template_key
}
@router.get("/profiles/stats")
def get_profile_stats(session: dict = Depends(require_admin)):
"""
Get statistics about configured profiles.
Returns count of training types with/without profiles.
"""
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
SELECT
COUNT(*) as total,
COUNT(profile) as configured,
COUNT(*) - COUNT(profile) as unconfigured
FROM training_types
""")
stats = cur.fetchone()
# Get list of types with profiles
cur.execute("""
SELECT id, name_de, category, subcategory
FROM training_types
WHERE profile IS NOT NULL
ORDER BY name_de
""")
configured_types = [r2d(r) for r in cur.fetchall()]
# Get list of types without profiles
cur.execute("""
SELECT id, name_de, category, subcategory
FROM training_types
WHERE profile IS NULL
ORDER BY name_de
""")
unconfigured_types = [r2d(r) for r in cur.fetchall()]
return {
"total": stats['total'],
"configured": stats['configured'],
"unconfigured": stats['unconfigured'],
"configured_types": configured_types,
"unconfigured_types": unconfigured_types
}

View File

@ -0,0 +1,393 @@
"""
Blood Pressure Router - v9d Phase 2d Refactored
Context-dependent blood pressure measurements (multiple times per day):
- Systolic/Diastolic Blood Pressure
- Pulse during measurement
- Context tagging (morning_fasted, after_meal, before_training, etc.)
- Warning flags (irregular heartbeat, AFib)
Endpoints:
- GET /api/blood-pressure List BP measurements
- GET /api/blood-pressure/by-date/{date} Get measurements for specific date
- POST /api/blood-pressure Create BP measurement
- PUT /api/blood-pressure/{id} Update BP measurement
- DELETE /api/blood-pressure/{id} Delete BP measurement
- GET /api/blood-pressure/stats Statistics and trends
- POST /api/blood-pressure/import/omron Import Omron CSV
"""
from fastapi import APIRouter, HTTPException, Depends, Header, UploadFile, File
from pydantic import BaseModel
from typing import Optional
from datetime import datetime, timedelta
import logging
import csv
import io
from db import get_db, get_cursor, r2d
from auth import require_auth
from routers.profiles import get_pid
router = APIRouter(prefix="/api/blood-pressure", tags=["blood_pressure"])
logger = logging.getLogger(__name__)
# German month mapping for Omron dates
GERMAN_MONTHS = {
'Januar': '01', 'Jan.': '01', 'Jan': '01',
'Februar': '02', 'Feb.': '02', 'Feb': '02',
'März': '03', 'Mär.': '03', 'Mär': '03',
'April': '04', 'Apr.': '04', 'Apr': '04',
'Mai': '05',
'Juni': '06', 'Jun.': '06', 'Jun': '06',
'Juli': '07', 'Jul.': '07', 'Jul': '07',
'August': '08', 'Aug.': '08', 'Aug': '08',
'September': '09', 'Sep.': '09', 'Sep': '09',
'Oktober': '10', 'Okt.': '10', 'Okt': '10',
'November': '11', 'Nov.': '11', 'Nov': '11',
'Dezember': '12', 'Dez.': '12', 'Dez': '12',
}
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Pydantic Models
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
class BPEntry(BaseModel):
measured_at: str # ISO format datetime
systolic: int
diastolic: int
pulse: Optional[int] = None
context: Optional[str] = None # morning_fasted, after_meal, etc.
irregular_heartbeat: Optional[bool] = False
possible_afib: Optional[bool] = False
note: Optional[str] = None
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Helper Functions
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
def parse_omron_date(date_str: str, time_str: str) -> str:
"""
Parse Omron German date/time format to ISO datetime.
Input: "13 März 2026", "08:30"
Output: "2026-03-13 08:30:00"
"""
try:
parts = date_str.strip().split()
if len(parts) != 3:
return None
day = parts[0]
month_name = parts[1]
year = parts[2]
month = GERMAN_MONTHS.get(month_name)
if not month:
return None
iso_date = f"{year}-{month}-{day.zfill(2)}"
iso_datetime = f"{iso_date} {time_str}:00"
# Validate
datetime.fromisoformat(iso_datetime)
return iso_datetime
except Exception as e:
logger.error(f"Error parsing Omron date: {date_str} {time_str} - {e}")
return None
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# CRUD Endpoints
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@router.get("")
def list_bp_measurements(
limit: int = 90,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Get blood pressure measurements (last N entries)."""
pid = get_pid(x_profile_id)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
SELECT * FROM blood_pressure_log
WHERE profile_id = %s
ORDER BY measured_at DESC
LIMIT %s
""", (pid, limit))
return [r2d(r) for r in cur.fetchall()]
@router.get("/by-date/{date}")
def get_bp_by_date(
date: str,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Get all BP measurements for a specific date."""
pid = get_pid(x_profile_id)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
SELECT * FROM blood_pressure_log
WHERE profile_id = %s
AND DATE(measured_at) = %s
ORDER BY measured_at ASC
""", (pid, date))
return [r2d(r) for r in cur.fetchall()]
@router.post("")
def create_bp_measurement(
entry: BPEntry,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Create new BP measurement."""
pid = get_pid(x_profile_id)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
INSERT INTO blood_pressure_log (
profile_id, measured_at,
systolic, diastolic, pulse,
context, irregular_heartbeat, possible_afib,
note, source
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, 'manual')
RETURNING *
""", (
pid, entry.measured_at,
entry.systolic, entry.diastolic, entry.pulse,
entry.context, entry.irregular_heartbeat, entry.possible_afib,
entry.note
))
return r2d(cur.fetchone())
@router.put("/{entry_id}")
def update_bp_measurement(
entry_id: int,
entry: BPEntry,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Update existing BP measurement."""
pid = get_pid(x_profile_id)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
UPDATE blood_pressure_log
SET measured_at = %s,
systolic = %s,
diastolic = %s,
pulse = %s,
context = %s,
irregular_heartbeat = %s,
possible_afib = %s,
note = %s
WHERE id = %s AND profile_id = %s
RETURNING *
""", (
entry.measured_at,
entry.systolic, entry.diastolic, entry.pulse,
entry.context, entry.irregular_heartbeat, entry.possible_afib,
entry.note,
entry_id, pid
))
row = cur.fetchone()
if not row:
raise HTTPException(404, "Entry not found")
return r2d(row)
@router.delete("/{entry_id}")
def delete_bp_measurement(
entry_id: int,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Delete BP measurement."""
pid = get_pid(x_profile_id)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
DELETE FROM blood_pressure_log
WHERE id = %s AND profile_id = %s
""", (entry_id, pid))
if cur.rowcount == 0:
raise HTTPException(404, "Entry not found")
return {"ok": True}
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Statistics & Trends
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@router.get("/stats")
def get_bp_stats(
days: int = 30,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Get blood pressure statistics and trends."""
pid = get_pid(x_profile_id)
cutoff_date = datetime.now() - timedelta(days=days)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
SELECT
COUNT(*) as total_measurements,
-- Overall averages
AVG(systolic) as avg_systolic,
AVG(diastolic) as avg_diastolic,
AVG(pulse) FILTER (WHERE pulse IS NOT NULL) as avg_pulse,
-- 7-day averages
AVG(systolic) FILTER (WHERE measured_at >= NOW() - INTERVAL '7 days') as avg_systolic_7d,
AVG(diastolic) FILTER (WHERE measured_at >= NOW() - INTERVAL '7 days') as avg_diastolic_7d,
-- Context-specific averages
AVG(systolic) FILTER (WHERE context = 'morning_fasted') as avg_systolic_morning,
AVG(diastolic) FILTER (WHERE context = 'morning_fasted') as avg_diastolic_morning,
AVG(systolic) FILTER (WHERE context = 'evening') as avg_systolic_evening,
AVG(diastolic) FILTER (WHERE context = 'evening') as avg_diastolic_evening,
-- Warning flags
COUNT(*) FILTER (WHERE irregular_heartbeat = true) as irregular_count,
COUNT(*) FILTER (WHERE possible_afib = true) as afib_count
FROM blood_pressure_log
WHERE profile_id = %s AND measured_at >= %s
""", (pid, cutoff_date))
stats = r2d(cur.fetchone())
# Classify BP ranges (WHO/ISH guidelines)
if stats['avg_systolic'] and stats['avg_diastolic']:
if stats['avg_systolic'] < 120 and stats['avg_diastolic'] < 80:
stats['bp_category'] = 'optimal'
elif stats['avg_systolic'] < 130 and stats['avg_diastolic'] < 85:
stats['bp_category'] = 'normal'
elif stats['avg_systolic'] < 140 and stats['avg_diastolic'] < 90:
stats['bp_category'] = 'high_normal'
elif stats['avg_systolic'] < 160 and stats['avg_diastolic'] < 100:
stats['bp_category'] = 'grade_1_hypertension'
elif stats['avg_systolic'] < 180 and stats['avg_diastolic'] < 110:
stats['bp_category'] = 'grade_2_hypertension'
else:
stats['bp_category'] = 'grade_3_hypertension'
else:
stats['bp_category'] = None
return stats
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Import: Omron CSV
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@router.post("/import/omron")
async def import_omron_csv(
file: UploadFile = File(...),
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Import blood pressure measurements from Omron CSV export."""
pid = get_pid(x_profile_id)
content = await file.read()
decoded = content.decode('utf-8')
reader = csv.DictReader(io.StringIO(decoded))
inserted = 0
updated = 0
skipped = 0
errors = 0
with get_db() as conn:
cur = get_cursor(conn)
for row in reader:
try:
# Parse Omron German date format
date_str = row.get('Datum', row.get('Date'))
time_str = row.get('Zeit', row.get('Time', '08:00'))
if not date_str:
skipped += 1
continue
measured_at = parse_omron_date(date_str, time_str)
if not measured_at:
errors += 1
continue
# Extract measurements
systolic = row.get('Systolisch', row.get('Systolic'))
diastolic = row.get('Diastolisch', row.get('Diastolic'))
pulse = row.get('Puls', row.get('Pulse'))
if not systolic or not diastolic:
skipped += 1
continue
# Parse warning flags
irregular = row.get('Unregelmäßiger Herzschlag', row.get('Irregular Heartbeat', ''))
afib = row.get('Vorhofflimmern', row.get('AFib', ''))
irregular_heartbeat = irregular.lower() in ['ja', 'yes', 'true', '1']
possible_afib = afib.lower() in ['ja', 'yes', 'true', '1']
# Determine context based on time
hour = int(time_str.split(':')[0])
if 5 <= hour < 10:
context = 'morning_fasted'
elif 18 <= hour < 23:
context = 'evening'
else:
context = 'other'
# Upsert
cur.execute("""
INSERT INTO blood_pressure_log (
profile_id, measured_at,
systolic, diastolic, pulse,
context, irregular_heartbeat, possible_afib,
source
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, 'omron')
ON CONFLICT (profile_id, measured_at)
DO UPDATE SET
systolic = EXCLUDED.systolic,
diastolic = EXCLUDED.diastolic,
pulse = EXCLUDED.pulse,
context = EXCLUDED.context,
irregular_heartbeat = EXCLUDED.irregular_heartbeat,
possible_afib = EXCLUDED.possible_afib
WHERE blood_pressure_log.source != 'manual'
RETURNING (xmax = 0) AS inserted
""", (
pid, measured_at,
int(systolic), int(diastolic),
int(pulse) if pulse else None,
context, irregular_heartbeat, possible_afib
))
result = cur.fetchone()
if result and result['inserted']:
inserted += 1
else:
updated += 1
except Exception as e:
logger.error(f"Error importing Omron row: {e}")
errors += 1
return {
"inserted": inserted,
"updated": updated,
"skipped": skipped,
"errors": errors
}

View File

@ -0,0 +1,146 @@
"""
Evaluation Endpoints - Training Type Profiles
Endpoints for activity evaluation and re-evaluation.
Issue: #15
Date: 2026-03-23
"""
import logging
from typing import Optional
from fastapi import APIRouter, HTTPException, Depends
from db import get_db, get_cursor, r2d
from auth import require_auth, require_admin
from evaluation_helper import (
evaluate_and_save_activity,
batch_evaluate_activities,
load_parameters_registry
)
router = APIRouter(prefix="/api/evaluation", tags=["evaluation"])
logger = logging.getLogger(__name__)
@router.get("/parameters")
def list_parameters(session: dict = Depends(require_auth)):
"""
List all available training parameters.
"""
with get_db() as conn:
cur = get_cursor(conn)
parameters = load_parameters_registry(cur)
return {
"parameters": list(parameters.values()),
"count": len(parameters)
}
@router.post("/activity/{activity_id}")
def evaluate_activity(
activity_id: str,
session: dict = Depends(require_auth)
):
"""
Evaluates or re-evaluates a single activity.
Returns the evaluation result.
"""
profile_id = session['profile_id']
with get_db() as conn:
cur = get_cursor(conn)
# Load activity
cur.execute("""
SELECT id, profile_id, date, training_type_id, duration_min,
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
rpe, pace_min_per_km, cadence, elevation_gain
FROM activity_log
WHERE id = %s AND profile_id = %s
""", (activity_id, profile_id))
activity = cur.fetchone()
if not activity:
raise HTTPException(404, "Activity not found")
activity_dict = dict(activity)
# Evaluate
result = evaluate_and_save_activity(
cur,
activity_dict["id"],
activity_dict,
activity_dict["training_type_id"],
profile_id
)
if not result:
return {
"message": "No profile configured for this training type",
"evaluation": None
}
return {
"message": "Activity evaluated",
"evaluation": result
}
@router.post("/batch")
def batch_evaluate(
limit: Optional[int] = None,
session: dict = Depends(require_auth)
):
"""
Re-evaluates all activities for the current user.
Optional limit parameter for testing.
"""
profile_id = session['profile_id']
with get_db() as conn:
cur = get_cursor(conn)
stats = batch_evaluate_activities(cur, profile_id, limit)
return {
"message": "Batch evaluation completed",
"stats": stats
}
@router.post("/batch/all")
def batch_evaluate_all(session: dict = Depends(require_admin)):
"""
Admin-only: Re-evaluates all activities for all users.
Use with caution on large databases!
"""
with get_db() as conn:
cur = get_cursor(conn)
# Get all profiles
cur.execute("SELECT id FROM profiles")
profiles = cur.fetchall()
total_stats = {
"profiles": len(profiles),
"total": 0,
"evaluated": 0,
"skipped": 0,
"errors": 0
}
for profile in profiles:
profile_id = profile['id']
stats = batch_evaluate_activities(cur, profile_id)
total_stats["total"] += stats["total"]
total_stats["evaluated"] += stats["evaluated"]
total_stats["skipped"] += stats["skipped"]
total_stats["errors"] += stats["errors"]
return {
"message": "Batch evaluation for all users completed",
"stats": total_stats
}

View File

@ -77,13 +77,27 @@ def _get_profile_data(pid: str):
nutrition = [r2d(r) for r in cur.fetchall()]
cur.execute("SELECT * FROM activity_log WHERE profile_id=%s ORDER BY date DESC LIMIT 90", (pid,))
activity = [r2d(r) for r in cur.fetchall()]
# v9d Phase 2: Sleep, Rest Days, Vitals
cur.execute("SELECT * FROM sleep_log WHERE profile_id=%s ORDER BY date DESC LIMIT 30", (pid,))
sleep = [r2d(r) for r in cur.fetchall()]
cur.execute("SELECT * FROM rest_days WHERE profile_id=%s ORDER BY date DESC LIMIT 30", (pid,))
rest_days = [r2d(r) for r in cur.fetchall()]
# v9d Phase 2d Refactored: separate baseline and BP tables
cur.execute("SELECT * FROM vitals_baseline WHERE profile_id=%s ORDER BY date DESC LIMIT 30", (pid,))
vitals_baseline = [r2d(r) for r in cur.fetchall()]
cur.execute("SELECT * FROM blood_pressure_log WHERE profile_id=%s ORDER BY measured_at DESC LIMIT 90", (pid,))
blood_pressure = [r2d(r) for r in cur.fetchall()]
return {
"profile": prof,
"weight": weight,
"circumference": circ,
"caliper": caliper,
"nutrition": nutrition,
"activity": activity
"activity": activity,
"sleep": sleep,
"rest_days": rest_days,
"vitals_baseline": vitals_baseline,
"blood_pressure": blood_pressure
}
@ -103,6 +117,10 @@ def _prepare_template_vars(data: dict) -> dict:
caliper = data['caliper']
nutrition = data['nutrition']
activity = data['activity']
sleep = data.get('sleep', [])
rest_days = data.get('rest_days', [])
vitals_baseline = data.get('vitals_baseline', [])
blood_pressure = data.get('blood_pressure', [])
vars = {
"name": prof.get('name', 'Nutzer'),
@ -192,6 +210,83 @@ def _prepare_template_vars(data: dict) -> dict:
vars['activity_detail'] = "keine Daten"
vars['activity_kcal_summary'] = "keine Daten"
# Sleep summary (v9d Phase 2b)
if sleep:
n = len(sleep)
avg_duration = sum(float(s.get('duration_minutes',0) or 0) for s in sleep) / n
avg_quality = sum(int(s.get('quality',0) or 0) for s in sleep if s.get('quality')) / max(sum(1 for s in sleep if s.get('quality')), 1)
deep_data = [s for s in sleep if s.get('deep_minutes')]
avg_deep = sum(float(s.get('deep_minutes',0)) for s in deep_data) / len(deep_data) if deep_data else 0
vars['sleep_summary'] = f"{n} Nächte, Ø {avg_duration/60:.1f}h Schlafdauer, Qualität {avg_quality:.1f}/5"
vars['sleep_detail'] = f"Ø {avg_duration:.0f}min gesamt, {avg_deep:.0f}min Tiefschlaf"
vars['sleep_avg_duration'] = round(avg_duration)
vars['sleep_avg_quality'] = round(avg_quality, 1)
vars['sleep_nights'] = n
else:
vars['sleep_summary'] = "keine Daten"
vars['sleep_detail'] = "keine Daten"
vars['sleep_avg_duration'] = 0
vars['sleep_avg_quality'] = 0
vars['sleep_nights'] = 0
# Rest Days summary (v9d Phase 2a)
if rest_days:
n = len(rest_days)
types = {}
for rd in rest_days:
rt = rd.get('rest_type', 'unknown')
types[rt] = types.get(rt, 0) + 1
type_summary = ", ".join([f"{k}: {v}x" for k, v in types.items()])
vars['rest_days_summary'] = f"{n} Ruhetage (letzte 30d): {type_summary}"
vars['rest_days_count'] = n
vars['rest_days_types'] = type_summary
else:
vars['rest_days_summary'] = "keine Daten"
vars['rest_days_count'] = 0
vars['rest_days_types'] = "keine"
# Vitals Baseline summary (v9d Phase 2d Refactored)
if vitals_baseline:
n = len(vitals_baseline)
hr_data = [v for v in vitals_baseline if v.get('resting_hr')]
hrv_data = [v for v in vitals_baseline if v.get('hrv')]
vo2_data = [v for v in vitals_baseline if v.get('vo2_max')]
avg_hr = sum(int(v.get('resting_hr')) for v in hr_data) / len(hr_data) if hr_data else 0
avg_hrv = sum(int(v.get('hrv')) for v in hrv_data) / len(hrv_data) if hrv_data else 0
latest_vo2 = float(vo2_data[0].get('vo2_max')) if vo2_data else 0
parts = []
if avg_hr: parts.append(f"Ruhepuls Ø {avg_hr:.0f}bpm")
if avg_hrv: parts.append(f"HRV Ø {avg_hrv:.0f}ms")
if latest_vo2: parts.append(f"VO2 Max {latest_vo2:.1f}")
vars['vitals_summary'] = f"{n} Messungen: " + ", ".join(parts) if parts else "keine verwertbaren Daten"
vars['vitals_detail'] = vars['vitals_summary']
vars['vitals_avg_hr'] = round(avg_hr)
vars['vitals_avg_hrv'] = round(avg_hrv)
vars['vitals_vo2_max'] = round(latest_vo2, 1) if latest_vo2 else "k.A."
else:
vars['vitals_summary'] = "keine Daten"
vars['vitals_detail'] = "keine Daten"
vars['vitals_avg_hr'] = 0
vars['vitals_avg_hrv'] = 0
vars['vitals_vo2_max'] = "k.A."
# Blood Pressure summary (v9d Phase 2d Refactored)
if blood_pressure:
n = len(blood_pressure)
bp_data = [bp for bp in blood_pressure if bp.get('systolic') and bp.get('diastolic')]
avg_bp_sys = sum(int(bp.get('systolic')) for bp in bp_data) / len(bp_data) if bp_data else 0
avg_bp_dia = sum(int(bp.get('diastolic')) for bp in bp_data) / len(bp_data) if bp_data else 0
vars['vitals_avg_bp'] = f"{round(avg_bp_sys)}/{round(avg_bp_dia)}" if avg_bp_sys else "k.A."
vars['bp_summary'] = f"{n} Messungen, Ø {avg_bp_sys:.0f}/{avg_bp_dia:.0f} mmHg" if avg_bp_sys else "keine Daten"
else:
vars['vitals_avg_bp'] = "k.A."
vars['bp_summary'] = "keine Daten"
return vars

681
backend/routers/vitals.py Normal file
View File

@ -0,0 +1,681 @@
"""
Vitals Router - Resting HR + HRV Tracking
v9d Phase 2: Vitals Module
Endpoints:
- GET /api/vitals List vitals (with limit)
- GET /api/vitals/by-date/{date} Get vitals for specific date
- POST /api/vitals Create/update vitals (upsert)
- PUT /api/vitals/{id} Update vitals
- DELETE /api/vitals/{id} Delete vitals
- GET /api/vitals/stats Get vitals statistics
- POST /api/vitals/import/omron Import Omron CSV
- POST /api/vitals/import/apple-health Import Apple Health CSV
"""
from fastapi import APIRouter, HTTPException, Depends, Header, UploadFile, File
from pydantic import BaseModel
from typing import Optional
from datetime import datetime, timedelta
import logging
import csv
import io
from dateutil import parser as date_parser
from db import get_db, get_cursor, r2d
from auth import require_auth
router = APIRouter(prefix="/api/vitals", tags=["vitals"])
logger = logging.getLogger(__name__)
# German month mapping for Omron dates
GERMAN_MONTHS = {
'Januar': '01', 'Jan.': '01',
'Februar': '02', 'Feb.': '02',
'März': '03',
'April': '04', 'Apr.': '04',
'Mai': '05',
'Juni': '06',
'Juli': '07',
'August': '08', 'Aug.': '08',
'September': '09', 'Sep.': '09',
'Oktober': '10', 'Okt.': '10',
'November': '11', 'Nov.': '11',
'Dezember': '12', 'Dez.': '12'
}
class VitalsEntry(BaseModel):
date: str
resting_hr: Optional[int] = None
hrv: Optional[int] = None
blood_pressure_systolic: Optional[int] = None
blood_pressure_diastolic: Optional[int] = None
pulse: Optional[int] = None
vo2_max: Optional[float] = None
spo2: Optional[int] = None
respiratory_rate: Optional[float] = None
irregular_heartbeat: Optional[bool] = None
possible_afib: Optional[bool] = None
note: Optional[str] = None
class VitalsUpdate(BaseModel):
date: Optional[str] = None
resting_hr: Optional[int] = None
hrv: Optional[int] = None
blood_pressure_systolic: Optional[int] = None
blood_pressure_diastolic: Optional[int] = None
pulse: Optional[int] = None
vo2_max: Optional[float] = None
spo2: Optional[int] = None
respiratory_rate: Optional[float] = None
irregular_heartbeat: Optional[bool] = None
possible_afib: Optional[bool] = None
note: Optional[str] = None
def get_pid(x_profile_id: Optional[str], session: dict) -> str:
"""Extract profile_id from session (never from header for security)."""
return session['profile_id']
@router.get("")
def list_vitals(
limit: int = 90,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Get vitals entries for current profile."""
pid = get_pid(x_profile_id, session)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute(
"""
SELECT id, profile_id, date, resting_hr, hrv,
blood_pressure_systolic, blood_pressure_diastolic, pulse,
vo2_max, spo2, respiratory_rate,
irregular_heartbeat, possible_afib,
note, source, created_at, updated_at
FROM vitals_log
WHERE profile_id = %s
ORDER BY date DESC
LIMIT %s
""",
(pid, limit)
)
return [r2d(r) for r in cur.fetchall()]
@router.get("/by-date/{date}")
def get_vitals_by_date(
date: str,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Get vitals entry for a specific date."""
pid = get_pid(x_profile_id, session)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute(
"""
SELECT id, profile_id, date, resting_hr, hrv,
blood_pressure_systolic, blood_pressure_diastolic, pulse,
vo2_max, spo2, respiratory_rate,
irregular_heartbeat, possible_afib,
note, source, created_at, updated_at
FROM vitals_log
WHERE profile_id = %s AND date = %s
""",
(pid, date)
)
row = cur.fetchone()
if not row:
raise HTTPException(404, "Keine Vitalwerte für dieses Datum gefunden")
return r2d(row)
@router.post("")
def create_vitals(
entry: VitalsEntry,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Create or update vitals entry (upsert)."""
pid = get_pid(x_profile_id, session)
# Validation: at least one vital must be provided
has_data = any([
entry.resting_hr, entry.hrv, entry.blood_pressure_systolic,
entry.blood_pressure_diastolic, entry.vo2_max, entry.spo2,
entry.respiratory_rate
])
if not has_data:
raise HTTPException(400, "Mindestens ein Vitalwert muss angegeben werden")
with get_db() as conn:
cur = get_cursor(conn)
# Upsert: insert or update if date already exists
cur.execute(
"""
INSERT INTO vitals_log (
profile_id, date, resting_hr, hrv,
blood_pressure_systolic, blood_pressure_diastolic, pulse,
vo2_max, spo2, respiratory_rate,
irregular_heartbeat, possible_afib,
note, source
)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, 'manual')
ON CONFLICT (profile_id, date)
DO UPDATE SET
resting_hr = COALESCE(EXCLUDED.resting_hr, vitals_log.resting_hr),
hrv = COALESCE(EXCLUDED.hrv, vitals_log.hrv),
blood_pressure_systolic = COALESCE(EXCLUDED.blood_pressure_systolic, vitals_log.blood_pressure_systolic),
blood_pressure_diastolic = COALESCE(EXCLUDED.blood_pressure_diastolic, vitals_log.blood_pressure_diastolic),
pulse = COALESCE(EXCLUDED.pulse, vitals_log.pulse),
vo2_max = COALESCE(EXCLUDED.vo2_max, vitals_log.vo2_max),
spo2 = COALESCE(EXCLUDED.spo2, vitals_log.spo2),
respiratory_rate = COALESCE(EXCLUDED.respiratory_rate, vitals_log.respiratory_rate),
irregular_heartbeat = COALESCE(EXCLUDED.irregular_heartbeat, vitals_log.irregular_heartbeat),
possible_afib = COALESCE(EXCLUDED.possible_afib, vitals_log.possible_afib),
note = COALESCE(EXCLUDED.note, vitals_log.note),
updated_at = CURRENT_TIMESTAMP
RETURNING id, profile_id, date, resting_hr, hrv,
blood_pressure_systolic, blood_pressure_diastolic, pulse,
vo2_max, spo2, respiratory_rate,
irregular_heartbeat, possible_afib,
note, source, created_at, updated_at
""",
(pid, entry.date, entry.resting_hr, entry.hrv,
entry.blood_pressure_systolic, entry.blood_pressure_diastolic, entry.pulse,
entry.vo2_max, entry.spo2, entry.respiratory_rate,
entry.irregular_heartbeat, entry.possible_afib,
entry.note)
)
row = cur.fetchone()
conn.commit()
logger.info(f"[VITALS] Upserted vitals for {pid} on {entry.date}")
return r2d(row)
@router.put("/{vitals_id}")
def update_vitals(
vitals_id: int,
updates: VitalsUpdate,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Update existing vitals entry."""
pid = get_pid(x_profile_id, session)
with get_db() as conn:
cur = get_cursor(conn)
# Check ownership
cur.execute(
"SELECT id FROM vitals_log WHERE id = %s AND profile_id = %s",
(vitals_id, pid)
)
if not cur.fetchone():
raise HTTPException(404, "Eintrag nicht gefunden")
# Build update query dynamically
fields = []
values = []
if updates.date is not None:
fields.append("date = %s")
values.append(updates.date)
if updates.resting_hr is not None:
fields.append("resting_hr = %s")
values.append(updates.resting_hr)
if updates.hrv is not None:
fields.append("hrv = %s")
values.append(updates.hrv)
if updates.blood_pressure_systolic is not None:
fields.append("blood_pressure_systolic = %s")
values.append(updates.blood_pressure_systolic)
if updates.blood_pressure_diastolic is not None:
fields.append("blood_pressure_diastolic = %s")
values.append(updates.blood_pressure_diastolic)
if updates.pulse is not None:
fields.append("pulse = %s")
values.append(updates.pulse)
if updates.vo2_max is not None:
fields.append("vo2_max = %s")
values.append(updates.vo2_max)
if updates.spo2 is not None:
fields.append("spo2 = %s")
values.append(updates.spo2)
if updates.respiratory_rate is not None:
fields.append("respiratory_rate = %s")
values.append(updates.respiratory_rate)
if updates.irregular_heartbeat is not None:
fields.append("irregular_heartbeat = %s")
values.append(updates.irregular_heartbeat)
if updates.possible_afib is not None:
fields.append("possible_afib = %s")
values.append(updates.possible_afib)
if updates.note is not None:
fields.append("note = %s")
values.append(updates.note)
if not fields:
raise HTTPException(400, "Keine Änderungen angegeben")
fields.append("updated_at = CURRENT_TIMESTAMP")
values.append(vitals_id)
query = f"""
UPDATE vitals_log
SET {', '.join(fields)}
WHERE id = %s
RETURNING id, profile_id, date, resting_hr, hrv,
blood_pressure_systolic, blood_pressure_diastolic, pulse,
vo2_max, spo2, respiratory_rate,
irregular_heartbeat, possible_afib,
note, source, created_at, updated_at
"""
cur.execute(query, values)
row = cur.fetchone()
conn.commit()
return r2d(row)
@router.delete("/{vitals_id}")
def delete_vitals(
vitals_id: int,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Delete vitals entry."""
pid = get_pid(x_profile_id, session)
with get_db() as conn:
cur = get_cursor(conn)
# Check ownership and delete
cur.execute(
"DELETE FROM vitals_log WHERE id = %s AND profile_id = %s RETURNING id",
(vitals_id, pid)
)
if not cur.fetchone():
raise HTTPException(404, "Eintrag nicht gefunden")
conn.commit()
logger.info(f"[VITALS] Deleted vitals {vitals_id} for {pid}")
return {"message": "Eintrag gelöscht"}
@router.get("/stats")
def get_vitals_stats(
days: int = 30,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""
Get vitals statistics over the last N days.
Returns:
- avg_resting_hr (7d and 30d)
- avg_hrv (7d and 30d)
- trend (increasing/decreasing/stable)
- latest values
"""
pid = get_pid(x_profile_id, session)
with get_db() as conn:
cur = get_cursor(conn)
# Get latest entry
cur.execute(
"""
SELECT date, resting_hr, hrv
FROM vitals_log
WHERE profile_id = %s AND date >= CURRENT_DATE - INTERVAL '%s days'
ORDER BY date DESC
LIMIT 1
""",
(pid, days)
)
latest = cur.fetchone()
# Get averages (7d and 30d)
cur.execute(
"""
SELECT
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '7 days' THEN resting_hr END) as avg_hr_7d,
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '30 days' THEN resting_hr END) as avg_hr_30d,
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '7 days' THEN hrv END) as avg_hrv_7d,
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '30 days' THEN hrv END) as avg_hrv_30d,
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '7 days' THEN blood_pressure_systolic END) as avg_bp_sys_7d,
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '30 days' THEN blood_pressure_systolic END) as avg_bp_sys_30d,
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '7 days' THEN blood_pressure_diastolic END) as avg_bp_dia_7d,
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '30 days' THEN blood_pressure_diastolic END) as avg_bp_dia_30d,
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '7 days' THEN spo2 END) as avg_spo2_7d,
AVG(CASE WHEN date >= CURRENT_DATE - INTERVAL '30 days' THEN spo2 END) as avg_spo2_30d,
COUNT(*) as total_entries
FROM vitals_log
WHERE profile_id = %s AND date >= CURRENT_DATE - INTERVAL '%s days'
""",
(pid, max(days, 30))
)
stats_row = cur.fetchone()
# Get latest VO2 Max
cur.execute(
"""
SELECT vo2_max
FROM vitals_log
WHERE profile_id = %s AND vo2_max IS NOT NULL
ORDER BY date DESC
LIMIT 1
""",
(pid,)
)
vo2_row = cur.fetchone()
latest_vo2 = vo2_row['vo2_max'] if vo2_row else None
# Get entries for trend calculation (last 14 days)
cur.execute(
"""
SELECT date, resting_hr, hrv
FROM vitals_log
WHERE profile_id = %s AND date >= CURRENT_DATE - INTERVAL '14 days'
ORDER BY date ASC
""",
(pid,)
)
entries = [r2d(r) for r in cur.fetchall()]
# Simple trend calculation (compare first half vs second half)
trend_hr = "stable"
trend_hrv = "stable"
if len(entries) >= 4:
mid = len(entries) // 2
first_half_hr = [e['resting_hr'] for e in entries[:mid] if e['resting_hr']]
second_half_hr = [e['resting_hr'] for e in entries[mid:] if e['resting_hr']]
if first_half_hr and second_half_hr:
avg_first = sum(first_half_hr) / len(first_half_hr)
avg_second = sum(second_half_hr) / len(second_half_hr)
diff = avg_second - avg_first
if diff > 2:
trend_hr = "increasing"
elif diff < -2:
trend_hr = "decreasing"
first_half_hrv = [e['hrv'] for e in entries[:mid] if e['hrv']]
second_half_hrv = [e['hrv'] for e in entries[mid:] if e['hrv']]
if first_half_hrv and second_half_hrv:
avg_first_hrv = sum(first_half_hrv) / len(first_half_hrv)
avg_second_hrv = sum(second_half_hrv) / len(second_half_hrv)
diff_hrv = avg_second_hrv - avg_first_hrv
if diff_hrv > 5:
trend_hrv = "increasing"
elif diff_hrv < -5:
trend_hrv = "decreasing"
return {
"latest": r2d(latest) if latest else None,
"avg_resting_hr_7d": round(stats_row['avg_hr_7d'], 1) if stats_row['avg_hr_7d'] else None,
"avg_resting_hr_30d": round(stats_row['avg_hr_30d'], 1) if stats_row['avg_hr_30d'] else None,
"avg_hrv_7d": round(stats_row['avg_hrv_7d'], 1) if stats_row['avg_hrv_7d'] else None,
"avg_hrv_30d": round(stats_row['avg_hrv_30d'], 1) if stats_row['avg_hrv_30d'] else None,
"avg_bp_systolic_7d": round(stats_row['avg_bp_sys_7d'], 1) if stats_row['avg_bp_sys_7d'] else None,
"avg_bp_systolic_30d": round(stats_row['avg_bp_sys_30d'], 1) if stats_row['avg_bp_sys_30d'] else None,
"avg_bp_diastolic_7d": round(stats_row['avg_bp_dia_7d'], 1) if stats_row['avg_bp_dia_7d'] else None,
"avg_bp_diastolic_30d": round(stats_row['avg_bp_dia_30d'], 1) if stats_row['avg_bp_dia_30d'] else None,
"avg_spo2_7d": round(stats_row['avg_spo2_7d'], 1) if stats_row['avg_spo2_7d'] else None,
"avg_spo2_30d": round(stats_row['avg_spo2_30d'], 1) if stats_row['avg_spo2_30d'] else None,
"latest_vo2_max": float(latest_vo2) if latest_vo2 else None,
"total_entries": stats_row['total_entries'],
"trend_resting_hr": trend_hr,
"trend_hrv": trend_hrv,
"period_days": days
}
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Import Endpoints
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
def parse_omron_date(date_str: str) -> str:
"""
Parse Omron German date format to YYYY-MM-DD.
Examples:
- "13 März 2026" -> "2026-03-13"
- "28 Feb. 2026" -> "2026-02-28"
"""
parts = date_str.strip().split()
if len(parts) != 3:
raise ValueError(f"Invalid date format: {date_str}")
day = parts[0].zfill(2)
month_str = parts[1]
year = parts[2]
# Map German month to number
month = GERMAN_MONTHS.get(month_str)
if not month:
raise ValueError(f"Unknown month: {month_str}")
return f"{year}-{month}-{day}"
@router.post("/import/omron")
async def import_omron_csv(
file: UploadFile = File(...),
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""
Import Omron blood pressure CSV export.
Expected format:
Datum,Zeit,Systolisch (mmHg),Diastolisch (mmHg),Puls (bpm),...
"""
pid = get_pid(x_profile_id, session)
# Read file
content = await file.read()
content_str = content.decode('utf-8')
# Parse CSV
reader = csv.DictReader(io.StringIO(content_str))
inserted = 0
updated = 0
skipped = 0
errors = []
with get_db() as conn:
cur = get_cursor(conn)
for row_num, row in enumerate(reader, start=2):
try:
# Parse date
date_str = parse_omron_date(row['Datum'])
# Parse values
systolic = int(row['Systolisch (mmHg)']) if row['Systolisch (mmHg)'] and row['Systolisch (mmHg)'] != '-' else None
diastolic = int(row['Diastolisch (mmHg)']) if row['Diastolisch (mmHg)'] and row['Diastolisch (mmHg)'] != '-' else None
pulse = int(row['Puls (bpm)']) if row['Puls (bpm)'] and row['Puls (bpm)'] != '-' else None
# Skip if no data
if not systolic and not diastolic and not pulse:
skipped += 1
continue
# Parse flags (optional columns)
irregular = row.get('Unregelmäßiger Herzschlag festgestellt', '').strip() not in ('', '-', ' ')
afib = row.get('Mögliches AFib', '').strip() not in ('', '-', ' ')
# Upsert
cur.execute(
"""
INSERT INTO vitals_log (
profile_id, date, blood_pressure_systolic, blood_pressure_diastolic,
pulse, irregular_heartbeat, possible_afib, source
)
VALUES (%s, %s, %s, %s, %s, %s, %s, 'omron')
ON CONFLICT (profile_id, date)
DO UPDATE SET
blood_pressure_systolic = COALESCE(EXCLUDED.blood_pressure_systolic, vitals_log.blood_pressure_systolic),
blood_pressure_diastolic = COALESCE(EXCLUDED.blood_pressure_diastolic, vitals_log.blood_pressure_diastolic),
pulse = COALESCE(EXCLUDED.pulse, vitals_log.pulse),
irregular_heartbeat = COALESCE(EXCLUDED.irregular_heartbeat, vitals_log.irregular_heartbeat),
possible_afib = COALESCE(EXCLUDED.possible_afib, vitals_log.possible_afib),
source = CASE WHEN vitals_log.source = 'manual' THEN vitals_log.source ELSE 'omron' END,
updated_at = CURRENT_TIMESTAMP
RETURNING (xmax = 0) AS inserted
""",
(pid, date_str, systolic, diastolic, pulse, irregular, afib)
)
result = cur.fetchone()
if result['inserted']:
inserted += 1
else:
updated += 1
except Exception as e:
errors.append(f"Zeile {row_num}: {str(e)}")
logger.error(f"[OMRON-IMPORT] Error at row {row_num}: {e}")
continue
conn.commit()
logger.info(f"[OMRON-IMPORT] {pid}: {inserted} inserted, {updated} updated, {skipped} skipped, {len(errors)} errors")
return {
"message": "Omron CSV Import abgeschlossen",
"inserted": inserted,
"updated": updated,
"skipped": skipped,
"errors": errors[:10] # Limit to first 10 errors
}
@router.post("/import/apple-health")
async def import_apple_health_csv(
file: UploadFile = File(...),
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""
Import Apple Health vitals CSV export.
Expected columns:
- Datum/Uhrzeit
- Ruhepuls (count/min)
- Herzfrequenzvariabilität (ms)
- VO2 max (ml/(kg·min))
- Blutsauerstoffsättigung (%)
- Atemfrequenz (count/min)
"""
pid = get_pid(x_profile_id, session)
# Read file
content = await file.read()
content_str = content.decode('utf-8')
# Parse CSV
reader = csv.DictReader(io.StringIO(content_str))
inserted = 0
updated = 0
skipped = 0
errors = []
with get_db() as conn:
cur = get_cursor(conn)
for row_num, row in enumerate(reader, start=2):
try:
# Parse date (format: "2026-02-21 00:00:00")
date_str = row.get('Datum/Uhrzeit', '').split()[0] # Extract date part
if not date_str:
skipped += 1
continue
# Parse values (columns might be empty)
resting_hr = None
hrv = None
vo2_max = None
spo2 = None
respiratory_rate = None
if 'Ruhepuls (count/min)' in row and row['Ruhepuls (count/min)']:
resting_hr = int(float(row['Ruhepuls (count/min)']))
if 'Herzfrequenzvariabilität (ms)' in row and row['Herzfrequenzvariabilität (ms)']:
hrv = int(float(row['Herzfrequenzvariabilität (ms)']))
if 'VO2 max (ml/(kg·min))' in row and row['VO2 max (ml/(kg·min))']:
vo2_max = float(row['VO2 max (ml/(kg·min))'])
if 'Blutsauerstoffsättigung (%)' in row and row['Blutsauerstoffsättigung (%)']:
spo2 = int(float(row['Blutsauerstoffsättigung (%)']))
if 'Atemfrequenz (count/min)' in row and row['Atemfrequenz (count/min)']:
respiratory_rate = float(row['Atemfrequenz (count/min)'])
# Skip if no vitals data
if not any([resting_hr, hrv, vo2_max, spo2, respiratory_rate]):
skipped += 1
continue
# Upsert
cur.execute(
"""
INSERT INTO vitals_log (
profile_id, date, resting_hr, hrv, vo2_max, spo2,
respiratory_rate, source
)
VALUES (%s, %s, %s, %s, %s, %s, %s, 'apple_health')
ON CONFLICT (profile_id, date)
DO UPDATE SET
resting_hr = COALESCE(EXCLUDED.resting_hr, vitals_log.resting_hr),
hrv = COALESCE(EXCLUDED.hrv, vitals_log.hrv),
vo2_max = COALESCE(EXCLUDED.vo2_max, vitals_log.vo2_max),
spo2 = COALESCE(EXCLUDED.spo2, vitals_log.spo2),
respiratory_rate = COALESCE(EXCLUDED.respiratory_rate, vitals_log.respiratory_rate),
source = CASE WHEN vitals_log.source = 'manual' THEN vitals_log.source ELSE 'apple_health' END,
updated_at = CURRENT_TIMESTAMP
RETURNING (xmax = 0) AS inserted
""",
(pid, date_str, resting_hr, hrv, vo2_max, spo2, respiratory_rate)
)
result = cur.fetchone()
if result['inserted']:
inserted += 1
else:
updated += 1
except Exception as e:
errors.append(f"Zeile {row_num}: {str(e)}")
logger.error(f"[APPLE-HEALTH-IMPORT] Error at row {row_num}: {e}")
continue
conn.commit()
logger.info(f"[APPLE-HEALTH-IMPORT] {pid}: {inserted} inserted, {updated} updated, {skipped} skipped, {len(errors)} errors")
return {
"message": "Apple Health CSV Import abgeschlossen",
"inserted": inserted,
"updated": updated,
"skipped": skipped,
"errors": errors[:10] # Limit to first 10 errors
}

View File

@ -0,0 +1,374 @@
"""
Vitals Baseline Router - v9d Phase 2d Refactored
Baseline vitals measured once daily (morning, fasted):
- Resting Heart Rate (RHR)
- Heart Rate Variability (HRV)
- VO2 Max
- SpO2 (Blood Oxygen Saturation)
- Respiratory Rate
Endpoints:
- GET /api/vitals/baseline List baseline vitals
- GET /api/vitals/baseline/by-date/{date} Get entry for specific date
- POST /api/vitals/baseline Create/update baseline entry (upsert)
- PUT /api/vitals/baseline/{id} Update baseline entry
- DELETE /api/vitals/baseline/{id} Delete baseline entry
- GET /api/vitals/baseline/stats Statistics and trends
- POST /api/vitals/baseline/import/apple-health Import Apple Health CSV
"""
from fastapi import APIRouter, HTTPException, Depends, Header, UploadFile, File
from pydantic import BaseModel
from typing import Optional
from datetime import datetime, timedelta
import logging
import csv
import io
from db import get_db, get_cursor, r2d
from auth import require_auth
from routers.profiles import get_pid
router = APIRouter(prefix="/api/vitals/baseline", tags=["vitals_baseline"])
logger = logging.getLogger(__name__)
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Pydantic Models
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
class BaselineEntry(BaseModel):
date: str
resting_hr: Optional[int] = None
hrv: Optional[int] = None
vo2_max: Optional[float] = None
spo2: Optional[int] = None
respiratory_rate: Optional[float] = None
body_temperature: Optional[float] = None
resting_metabolic_rate: Optional[int] = None
note: Optional[str] = None
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# CRUD Endpoints
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@router.get("")
def list_baseline_vitals(
limit: int = 90,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Get baseline vitals (last N days)."""
pid = get_pid(x_profile_id)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
SELECT * FROM vitals_baseline
WHERE profile_id = %s
ORDER BY date DESC
LIMIT %s
""", (pid, limit))
return [r2d(r) for r in cur.fetchall()]
@router.get("/by-date/{date}")
def get_baseline_by_date(
date: str,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Get baseline entry for specific date."""
pid = get_pid(x_profile_id)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
SELECT * FROM vitals_baseline
WHERE profile_id = %s AND date = %s
""", (pid, date))
row = cur.fetchone()
return r2d(row) if row else None
@router.post("")
def create_or_update_baseline(
entry: BaselineEntry,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Create or update baseline entry (upsert on date)."""
pid = get_pid(x_profile_id)
# Build dynamic update columns (only non-None fields)
fields = []
values = [pid, entry.date]
if entry.resting_hr is not None:
fields.append("resting_hr = COALESCE(EXCLUDED.resting_hr, vitals_baseline.resting_hr)")
values.append(entry.resting_hr)
if entry.hrv is not None:
fields.append("hrv = COALESCE(EXCLUDED.hrv, vitals_baseline.hrv)")
values.append(entry.hrv)
if entry.vo2_max is not None:
fields.append("vo2_max = COALESCE(EXCLUDED.vo2_max, vitals_baseline.vo2_max)")
values.append(entry.vo2_max)
if entry.spo2 is not None:
fields.append("spo2 = COALESCE(EXCLUDED.spo2, vitals_baseline.spo2)")
values.append(entry.spo2)
if entry.respiratory_rate is not None:
fields.append("respiratory_rate = COALESCE(EXCLUDED.respiratory_rate, vitals_baseline.respiratory_rate)")
values.append(entry.respiratory_rate)
if entry.body_temperature is not None:
fields.append("body_temperature = COALESCE(EXCLUDED.body_temperature, vitals_baseline.body_temperature)")
values.append(entry.body_temperature)
if entry.resting_metabolic_rate is not None:
fields.append("resting_metabolic_rate = COALESCE(EXCLUDED.resting_metabolic_rate, vitals_baseline.resting_metabolic_rate)")
values.append(entry.resting_metabolic_rate)
if entry.note:
fields.append("note = COALESCE(EXCLUDED.note, vitals_baseline.note)")
values.append(entry.note)
# At least one field must be provided
if not fields:
raise HTTPException(400, "At least one baseline vital must be provided")
# Build value placeholders
placeholders = ", ".join([f"${i}" for i in range(1, len(values) + 1)])
with get_db() as conn:
cur = get_cursor(conn)
query = f"""
INSERT INTO vitals_baseline (profile_id, date, {', '.join([f.split('=')[0].strip() for f in fields])})
VALUES ($1, $2, {', '.join([f'${i}' for i in range(3, len(values) + 1)])})
ON CONFLICT (profile_id, date)
DO UPDATE SET {', '.join(fields)}, updated_at = NOW()
RETURNING *
"""
cur.execute(query, values)
return r2d(cur.fetchone())
@router.put("/{entry_id}")
def update_baseline(
entry_id: int,
entry: BaselineEntry,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Update existing baseline entry."""
pid = get_pid(x_profile_id)
# Build SET clause dynamically
updates = []
values = []
idx = 1
if entry.resting_hr is not None:
updates.append(f"resting_hr = ${idx}")
values.append(entry.resting_hr)
idx += 1
if entry.hrv is not None:
updates.append(f"hrv = ${idx}")
values.append(entry.hrv)
idx += 1
if entry.vo2_max is not None:
updates.append(f"vo2_max = ${idx}")
values.append(entry.vo2_max)
idx += 1
if entry.spo2 is not None:
updates.append(f"spo2 = ${idx}")
values.append(entry.spo2)
idx += 1
if entry.respiratory_rate is not None:
updates.append(f"respiratory_rate = ${idx}")
values.append(entry.respiratory_rate)
idx += 1
if entry.note:
updates.append(f"note = ${idx}")
values.append(entry.note)
idx += 1
if not updates:
raise HTTPException(400, "No fields to update")
updates.append("updated_at = NOW()")
values.extend([entry_id, pid])
with get_db() as conn:
cur = get_cursor(conn)
query = f"""
UPDATE vitals_baseline
SET {', '.join(updates)}
WHERE id = ${idx} AND profile_id = ${idx + 1}
RETURNING *
"""
cur.execute(query, values)
row = cur.fetchone()
if not row:
raise HTTPException(404, "Entry not found")
return r2d(row)
@router.delete("/{entry_id}")
def delete_baseline(
entry_id: int,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Delete baseline entry."""
pid = get_pid(x_profile_id)
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
DELETE FROM vitals_baseline
WHERE id = %s AND profile_id = %s
""", (entry_id, pid))
if cur.rowcount == 0:
raise HTTPException(404, "Entry not found")
return {"ok": True}
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Statistics & Trends
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@router.get("/stats")
def get_baseline_stats(
days: int = 30,
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Get baseline vitals statistics and trends."""
pid = get_pid(x_profile_id)
cutoff_date = (datetime.now() - timedelta(days=days)).date()
with get_db() as conn:
cur = get_cursor(conn)
cur.execute("""
SELECT
COUNT(*) as total_entries,
-- Resting HR
AVG(resting_hr) FILTER (WHERE date >= %s - INTERVAL '7 days') as avg_rhr_7d,
AVG(resting_hr) FILTER (WHERE date >= %s - INTERVAL '30 days') as avg_rhr_30d,
-- HRV
AVG(hrv) FILTER (WHERE date >= %s - INTERVAL '7 days') as avg_hrv_7d,
AVG(hrv) FILTER (WHERE date >= %s - INTERVAL '30 days') as avg_hrv_30d,
-- Latest values
(SELECT vo2_max FROM vitals_baseline WHERE profile_id = %s AND vo2_max IS NOT NULL ORDER BY date DESC LIMIT 1) as latest_vo2_max,
AVG(spo2) FILTER (WHERE date >= %s - INTERVAL '7 days') as avg_spo2_7d
FROM vitals_baseline
WHERE profile_id = %s AND date >= %s
""", (cutoff_date, cutoff_date, cutoff_date, cutoff_date, pid, cutoff_date, pid, cutoff_date))
stats = r2d(cur.fetchone())
# Calculate trends (7d vs 30d)
if stats['avg_rhr_7d'] and stats['avg_rhr_30d']:
if stats['avg_rhr_7d'] < stats['avg_rhr_30d'] - 2:
stats['trend_rhr'] = 'decreasing' # Good!
elif stats['avg_rhr_7d'] > stats['avg_rhr_30d'] + 2:
stats['trend_rhr'] = 'increasing' # Warning
else:
stats['trend_rhr'] = 'stable'
else:
stats['trend_rhr'] = None
if stats['avg_hrv_7d'] and stats['avg_hrv_30d']:
if stats['avg_hrv_7d'] > stats['avg_hrv_30d'] + 5:
stats['trend_hrv'] = 'increasing' # Good!
elif stats['avg_hrv_7d'] < stats['avg_hrv_30d'] - 5:
stats['trend_hrv'] = 'decreasing' # Warning
else:
stats['trend_hrv'] = 'stable'
else:
stats['trend_hrv'] = None
return stats
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
# Import: Apple Health CSV
# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
@router.post("/import/apple-health")
async def import_apple_health_baseline(
file: UploadFile = File(...),
x_profile_id: Optional[str] = Header(default=None),
session: dict = Depends(require_auth)
):
"""Import baseline vitals from Apple Health CSV export."""
pid = get_pid(x_profile_id)
content = await file.read()
decoded = content.decode('utf-8')
reader = csv.DictReader(io.StringIO(decoded))
inserted = 0
updated = 0
skipped = 0
errors = 0
with get_db() as conn:
cur = get_cursor(conn)
for row in reader:
try:
date = row.get('Start')[:10] if row.get('Start') else None
if not date:
skipped += 1
continue
# Extract baseline vitals from Apple Health export
rhr = row.get('Resting Heart Rate')
hrv = row.get('Heart Rate Variability')
vo2 = row.get('VO2 Max')
spo2 = row.get('Oxygen Saturation')
resp_rate = row.get('Respiratory Rate')
# Skip if no baseline vitals
if not any([rhr, hrv, vo2, spo2, resp_rate]):
skipped += 1
continue
# Upsert
cur.execute("""
INSERT INTO vitals_baseline (
profile_id, date,
resting_hr, hrv, vo2_max, spo2, respiratory_rate,
source
) VALUES (%s, %s, %s, %s, %s, %s, %s, 'apple_health')
ON CONFLICT (profile_id, date)
DO UPDATE SET
resting_hr = COALESCE(EXCLUDED.resting_hr, vitals_baseline.resting_hr),
hrv = COALESCE(EXCLUDED.hrv, vitals_baseline.hrv),
vo2_max = COALESCE(EXCLUDED.vo2_max, vitals_baseline.vo2_max),
spo2 = COALESCE(EXCLUDED.spo2, vitals_baseline.spo2),
respiratory_rate = COALESCE(EXCLUDED.respiratory_rate, vitals_baseline.respiratory_rate),
updated_at = NOW()
WHERE vitals_baseline.source != 'manual'
RETURNING (xmax = 0) AS inserted
""", (
pid, date,
int(rhr) if rhr else None,
int(hrv) if hrv else None,
float(vo2) if vo2 else None,
int(spo2) if spo2 else None,
float(resp_rate) if resp_rate else None
))
result = cur.fetchone()
if result and result['inserted']:
inserted += 1
else:
updated += 1
except Exception as e:
logger.error(f"Error importing row: {e}")
errors += 1
return {
"inserted": inserted,
"updated": updated,
"skipped": skipped,
"errors": errors
}

427
backend/rule_engine.py Normal file
View File

@ -0,0 +1,427 @@
"""
Training Type Profiles - Rule Engine
Flexible rule evaluation system for activity quality assessment.
Issue: #15
Date: 2026-03-23
"""
from typing import Any, Dict, List, Optional, Callable
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class RuleEvaluator:
"""
Generic rule evaluator for arbitrary parameters and operators.
Supports flexible rule definitions with various operators:
- gte, lte, gt, lt: Comparison operators
- eq, neq: Equality operators
- between: Range checks
- in, not_in: Set membership
"""
# Operator definitions
OPERATORS: Dict[str, Callable[[Any, Any], bool]] = {
"gte": lambda actual, expected: actual is not None and actual >= expected,
"lte": lambda actual, expected: actual is not None and actual <= expected,
"gt": lambda actual, expected: actual is not None and actual > expected,
"lt": lambda actual, expected: actual is not None and actual < expected,
"eq": lambda actual, expected: actual == expected,
"neq": lambda actual, expected: actual != expected,
"between": lambda actual, expected: actual is not None and expected[0] <= actual <= expected[1],
"in": lambda actual, expected: actual in expected,
"not_in": lambda actual, expected: actual not in expected,
}
OPERATOR_SYMBOLS = {
"gte": "",
"lte": "",
"gt": ">",
"lt": "<",
"eq": "=",
"neq": "",
"between": "",
"in": "",
"not_in": "",
}
@classmethod
def evaluate_rule(
cls,
rule: Dict,
activity: Dict,
parameters_registry: Dict[str, Dict]
) -> Dict:
"""
Evaluates a single rule against an activity.
Args:
rule: {
"parameter": str,
"operator": str,
"value": Any,
"weight": int,
"optional": bool,
"reason": str
}
activity: Activity data dictionary
parameters_registry: Mapping parameter_key -> config
Returns:
{
"passed": bool,
"actual_value": Any,
"expected_value": Any,
"parameter": str,
"operator": str,
"operator_symbol": str,
"reason": str,
"weight": int,
"skipped": bool (optional),
"error": str (optional)
}
"""
param_key = rule.get("parameter")
operator = rule.get("operator")
expected_value = rule.get("value")
weight = rule.get("weight", 1)
reason = rule.get("reason", "")
optional = rule.get("optional", False)
# Get parameter configuration
param_config = parameters_registry.get(param_key)
if not param_config:
return {
"passed": False,
"parameter": param_key,
"error": f"Unknown parameter: {param_key}"
}
# Extract value from activity
source_field = param_config.get("source_field", param_key)
actual_value = activity.get(source_field)
# Optional and not provided? → Pass
if optional and actual_value is None:
return {
"passed": True,
"actual_value": None,
"expected_value": expected_value,
"parameter": param_key,
"operator": operator,
"operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator),
"reason": "Optional parameter not provided",
"weight": weight,
"skipped": True
}
# Required but not provided? → Fail
if actual_value is None:
return {
"passed": False,
"actual_value": None,
"expected_value": expected_value,
"parameter": param_key,
"operator": operator,
"operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator),
"reason": reason or "Required parameter missing",
"weight": weight
}
# Apply operator
operator_func = cls.OPERATORS.get(operator)
if not operator_func:
return {
"passed": False,
"parameter": param_key,
"error": f"Unknown operator: {operator}"
}
try:
passed = operator_func(actual_value, expected_value)
except Exception as e:
logger.error(f"[RULE-ENGINE] Error evaluating rule {param_key}: {e}")
return {
"passed": False,
"parameter": param_key,
"error": f"Evaluation error: {str(e)}"
}
return {
"passed": passed,
"actual_value": actual_value,
"expected_value": expected_value,
"parameter": param_key,
"operator": operator,
"operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator),
"reason": reason,
"weight": weight
}
@classmethod
def evaluate_rule_set(
cls,
rule_set: Dict,
activity: Dict,
parameters_registry: Dict[str, Dict]
) -> Dict:
"""
Evaluates a complete rule set (e.g., minimum_requirements).
Args:
rule_set: {
"enabled": bool,
"pass_strategy": str,
"pass_threshold": float,
"rules": [...]
}
activity: Activity data
parameters_registry: Parameter configurations
Returns:
{
"enabled": bool,
"passed": bool,
"score": float (0-1),
"rule_results": [...],
"pass_strategy": str,
"pass_threshold": float,
"failed_rules": [...]
}
"""
if not rule_set.get("enabled", False):
return {
"enabled": False,
"passed": True,
"score": 1.0,
"rule_results": [],
"failed_rules": []
}
rules = rule_set.get("rules", [])
pass_strategy = rule_set.get("pass_strategy", "weighted_score")
pass_threshold = rule_set.get("pass_threshold", 0.6)
rule_results = []
failed_rules = []
total_weight = 0
passed_weight = 0
# Evaluate each rule
for rule in rules:
result = cls.evaluate_rule(rule, activity, parameters_registry)
rule_results.append(result)
if result.get("skipped"):
continue
if result.get("error"):
logger.warning(f"[RULE-ENGINE] Rule error: {result['error']}")
continue
weight = result.get("weight", 1)
total_weight += weight
if result["passed"]:
passed_weight += weight
else:
failed_rules.append(result)
# Calculate score
score = passed_weight / total_weight if total_weight > 0 else 1.0
# Apply pass strategy
if pass_strategy == "all_must_pass":
passed = all(
r["passed"] for r in rule_results
if not r.get("skipped") and not r.get("error")
)
elif pass_strategy == "weighted_score":
passed = score >= pass_threshold
elif pass_strategy == "at_least_n":
n = rule_set.get("at_least_n", 1)
passed_count = sum(
1 for r in rule_results
if r["passed"] and not r.get("skipped")
)
passed = passed_count >= n
else:
passed = False
logger.warning(f"[RULE-ENGINE] Unknown pass strategy: {pass_strategy}")
return {
"enabled": True,
"passed": passed,
"score": round(score, 2),
"rule_results": rule_results,
"failed_rules": failed_rules,
"pass_strategy": pass_strategy,
"pass_threshold": pass_threshold
}
class IntensityZoneEvaluator:
"""
Evaluates heart rate zones and time distribution.
"""
@staticmethod
def evaluate(
zone_config: Dict,
activity: Dict,
user_profile: Dict
) -> Dict:
"""
Evaluates which HR zone the activity was in.
Args:
zone_config: intensity_zones configuration
activity: Activity data (with hr_avg)
user_profile: User profile (with hf_max)
Returns:
{
"enabled": bool,
"dominant_zone": str,
"avg_hr_percent": float,
"zone_color": str,
"zone_effect": str,
"duration_quality": float (0-1),
"recommendation": str
}
"""
if not zone_config.get("enabled", False):
return {"enabled": False}
avg_hr = activity.get("hr_avg")
user_max_hr = user_profile.get("hf_max", 180) # Default 180 if not set
if not avg_hr or not user_max_hr:
return {
"enabled": True,
"dominant_zone": "unknown",
"avg_hr_percent": None,
"recommendation": "Herzfrequenz-Daten fehlen"
}
avg_hr_percent = (avg_hr / user_max_hr) * 100
# Find matching zone
zones = zone_config.get("zones", [])
dominant_zone = None
for zone in zones:
zone_rules = zone.get("rules", [])
for rule in zone_rules:
if rule["parameter"] == "avg_hr_percent":
min_percent, max_percent = rule["value"]
if min_percent <= avg_hr_percent <= max_percent:
dominant_zone = zone
break
if dominant_zone:
break
if not dominant_zone:
return {
"enabled": True,
"dominant_zone": "out_of_range",
"avg_hr_percent": round(avg_hr_percent, 1),
"recommendation": "Herzfrequenz außerhalb definierter Zonen"
}
# Check duration quality
duration = activity.get("duration_min", 0)
target_duration = dominant_zone.get("target_duration_min", 30)
duration_quality = min(duration / target_duration, 1.0) if target_duration > 0 else 1.0
recommendation = f"Training in Zone '{dominant_zone['name']}' (Effekt: {dominant_zone['effect']})."
if duration < target_duration:
recommendation += f" Für optimale Wirkung: {target_duration}min empfohlen."
return {
"enabled": True,
"dominant_zone": dominant_zone.get("id"),
"dominant_zone_name": dominant_zone.get("name"),
"avg_hr_percent": round(avg_hr_percent, 1),
"zone_color": dominant_zone.get("color"),
"zone_effect": dominant_zone.get("effect"),
"duration_quality": round(duration_quality, 2),
"target_duration_min": target_duration,
"actual_duration_min": duration,
"recommendation": recommendation
}
class TrainingEffectsEvaluator:
"""
Evaluates which abilities are trained by the activity.
"""
@staticmethod
def evaluate(
effects_config: Dict,
activity: Dict,
intensity_zone_result: Optional[Dict] = None
) -> Dict:
"""
Evaluates training effects (abilities trained).
Args:
effects_config: training_effects configuration
activity: Activity data
intensity_zone_result: Result from intensity zone evaluation
Returns:
{
"enabled": bool,
"abilities_trained": [...],
"total_training_load": float
}
"""
if not effects_config.get("enabled", False):
return {"enabled": False}
abilities_trained = []
# Use default effects if no conditional matching
default_effects = effects_config.get("default_effects", {})
primary_abilities = default_effects.get("primary_abilities", [])
secondary_abilities = default_effects.get("secondary_abilities", [])
# Calculate quality factor (simplified for now)
quality_factor = 1.0
# Primary abilities
for ability in primary_abilities:
abilities_trained.append({
"category": ability["category"],
"ability": ability["ability"],
"intensity": ability["intensity"],
"quality": quality_factor,
"contribution": ability["intensity"] * quality_factor,
"type": "primary"
})
# Secondary abilities
for ability in secondary_abilities:
abilities_trained.append({
"category": ability["category"],
"ability": ability["ability"],
"intensity": ability["intensity"],
"quality": quality_factor * 0.7, # Secondary = 70%
"contribution": ability["intensity"] * quality_factor * 0.7,
"type": "secondary"
})
total_training_load = sum(a["contribution"] for a in abilities_trained)
return {
"enabled": True,
"abilities_trained": abilities_trained,
"total_training_load": round(total_training_load, 2),
"metabolic_focus": effects_config.get("metabolic_focus", []),
"muscle_groups": effects_config.get("muscle_groups", [])
}

View File

@ -29,9 +29,11 @@ import AdminCouponsPage from './pages/AdminCouponsPage'
import AdminUserRestrictionsPage from './pages/AdminUserRestrictionsPage'
import AdminTrainingTypesPage from './pages/AdminTrainingTypesPage'
import AdminActivityMappingsPage from './pages/AdminActivityMappingsPage'
import AdminTrainingProfiles from './pages/AdminTrainingProfiles'
import SubscriptionPage from './pages/SubscriptionPage'
import SleepPage from './pages/SleepPage'
import RestDaysPage from './pages/RestDaysPage'
import VitalsPage from './pages/VitalsPage'
import './app.css'
function Nav() {
@ -168,6 +170,7 @@ function AppShell() {
<Route path="/history" element={<History/>}/>
<Route path="/sleep" element={<SleepPage/>}/>
<Route path="/rest-days" element={<RestDaysPage/>}/>
<Route path="/vitals" element={<VitalsPage/>}/>
<Route path="/nutrition" element={<NutritionPage/>}/>
<Route path="/activity" element={<ActivityPage/>}/>
<Route path="/analysis" element={<Analysis/>}/>
@ -180,6 +183,7 @@ function AppShell() {
<Route path="/admin/user-restrictions" element={<AdminUserRestrictionsPage/>}/>
<Route path="/admin/training-types" element={<AdminTrainingTypesPage/>}/>
<Route path="/admin/activity-mappings" element={<AdminActivityMappingsPage/>}/>
<Route path="/admin/training-profiles" element={<AdminTrainingProfiles/>}/>
<Route path="/subscription" element={<SubscriptionPage/>}/>
</Routes>
</main>

View File

@ -0,0 +1,456 @@
import { useState, useEffect } from 'react'
import { Trash2, Plus, ChevronDown, ChevronUp } from 'lucide-react'
import '../app.css'
const OPERATORS = [
{ value: 'gte', label: '≥ Größer gleich', types: ['integer', 'float'] },
{ value: 'lte', label: '≤ Kleiner gleich', types: ['integer', 'float'] },
{ value: 'gt', label: '> Größer', types: ['integer', 'float'] },
{ value: 'lt', label: '< Kleiner', types: ['integer', 'float'] },
{ value: 'eq', label: '= Gleich', types: ['integer', 'float', 'string'] },
{ value: 'neq', label: '≠ Ungleich', types: ['integer', 'float', 'string'] },
{ value: 'between', label: '⟷ Zwischen', types: ['integer', 'float'] },
]
const PASS_STRATEGIES = [
{ value: 'weighted_score', label: 'Gewichteter Score' },
{ value: 'all_must_pass', label: 'Alle müssen erfüllt sein' },
{ value: 'at_least_n', label: 'Mindestens N Regeln' },
]
export default function ProfileBuilder({ trainingType, onSave, onCancel, parameters }) {
const [profile, setProfile] = useState(null)
const [loading, setLoading] = useState(false)
const [expandedSections, setExpandedSections] = useState({ minReq: true })
const [successMessage, setSuccessMessage] = useState(null)
useEffect(() => {
// Initialize or load existing profile
if (trainingType.profile) {
setProfile(trainingType.profile)
} else {
// Create empty profile structure
setProfile({
version: '1.0',
name: `${trainingType.name_de} (Profil)`,
description: '',
rule_sets: {
minimum_requirements: {
enabled: true,
pass_strategy: 'weighted_score',
pass_threshold: 0.6,
rules: []
},
intensity_zones: {
enabled: false,
zones: []
},
training_effects: {
enabled: false,
default_effects: {
primary_abilities: [],
secondary_abilities: []
}
},
periodization: {
enabled: false,
frequency: {
per_week_optimal: 3,
per_week_max: 5
},
recovery: {
min_hours_between: 24
}
},
performance_indicators: {
enabled: false
},
safety: {
enabled: false,
warnings: []
}
}
})
}
}, [trainingType])
const toggleSection = (section) => {
setExpandedSections(prev => ({ ...prev, [section]: !prev[section] }))
}
const updateRuleSet = (key, updates) => {
setProfile(prev => ({
...prev,
rule_sets: {
...prev.rule_sets,
[key]: {
...prev.rule_sets[key],
...updates
}
}
}))
}
const addRule = () => {
const newRule = {
parameter: parameters[0]?.key || 'duration_min',
operator: 'gte',
value: 0,
weight: 3,
optional: false,
reason: ''
}
setProfile(prev => ({
...prev,
rule_sets: {
...prev.rule_sets,
minimum_requirements: {
...prev.rule_sets.minimum_requirements,
rules: [...prev.rule_sets.minimum_requirements.rules, newRule]
}
}
}))
}
const updateRule = (index, updates) => {
setProfile(prev => {
const rules = [...prev.rule_sets.minimum_requirements.rules]
rules[index] = { ...rules[index], ...updates }
return {
...prev,
rule_sets: {
...prev.rule_sets,
minimum_requirements: {
...prev.rule_sets.minimum_requirements,
rules
}
}
}
})
}
const removeRule = (index) => {
setProfile(prev => ({
...prev,
rule_sets: {
...prev.rule_sets,
minimum_requirements: {
...prev.rule_sets.minimum_requirements,
rules: prev.rule_sets.minimum_requirements.rules.filter((_, i) => i !== index)
}
}
}))
}
const handleSave = async () => {
setLoading(true)
try {
await onSave(profile)
setSuccessMessage('✓ Profil gespeichert!')
setTimeout(() => {
setSuccessMessage(null)
}, 2000)
} catch (err) {
// Error is already handled by parent
} finally {
setLoading(false)
}
}
if (!profile) return <div className="spinner" />
const minReq = profile.rule_sets.minimum_requirements
return (
<div style={{ background: 'var(--surface)', borderRadius: '12px', padding: '20px' }}>
<div style={{ marginBottom: '20px' }}>
<h3 style={{ marginBottom: '8px' }}>
{trainingType.icon} {trainingType.name_de} - Profil konfigurieren
</h3>
<p style={{ fontSize: '13px', color: 'var(--text2)' }}>
Definiere Mindestanforderungen und Bewertungskriterien für diesen Trainingstyp
</p>
</div>
{/* Success Message */}
{successMessage && (
<div style={{
padding: '12px',
background: 'var(--accent)',
color: 'white',
borderRadius: '8px',
marginBottom: '16px',
fontSize: '14px',
fontWeight: '600'
}}>
{successMessage}
</div>
)}
{/* Minimum Requirements */}
<div className="card" style={{ marginBottom: '16px' }}>
<div
style={{
display: 'flex',
alignItems: 'center',
justifyContent: 'space-between',
cursor: 'pointer'
}}
onClick={() => toggleSection('minReq')}
>
<div style={{ display: 'flex', alignItems: 'center', gap: '8px' }}>
<h4 style={{ margin: 0 }}>Mindestanforderungen</h4>
<label style={{ display: 'flex', alignItems: 'center', gap: '4px', fontSize: '13px' }}>
<input
type="checkbox"
checked={minReq.enabled}
onChange={(e) => {
e.stopPropagation()
updateRuleSet('minimum_requirements', { enabled: e.target.checked })
}}
/>
Aktiviert
</label>
</div>
{expandedSections.minReq ? <ChevronUp size={18} /> : <ChevronDown size={18} />}
</div>
{expandedSections.minReq && minReq.enabled && (
<div style={{ marginTop: '16px' }}>
{/* Strategy */}
<div style={{ marginBottom: '16px', paddingBottom: '16px', borderBottom: '1px solid var(--border)' }}>
<div className="form-row" style={{ marginBottom: '8px' }}>
<label className="form-label">Pass-Strategie</label>
<select
className="form-select"
value={minReq.pass_strategy}
onChange={(e) => updateRuleSet('minimum_requirements', { pass_strategy: e.target.value })}
>
{PASS_STRATEGIES.map(s => (
<option key={s.value} value={s.value}>{s.label}</option>
))}
</select>
</div>
{minReq.pass_strategy === 'weighted_score' && (
<div className="form-row">
<label className="form-label">Threshold (Score 0-1)</label>
<input
type="number"
className="form-input"
step="0.1"
min="0"
max="1"
value={minReq.pass_threshold}
onChange={(e) => updateRuleSet('minimum_requirements', { pass_threshold: parseFloat(e.target.value) })}
/>
<span className="form-unit">{(minReq.pass_threshold * 100).toFixed(0)}%</span>
</div>
)}
</div>
{/* Rules */}
<div style={{ marginBottom: '16px' }}>
<div style={{ fontWeight: '600', fontSize: '14px', marginBottom: '12px' }}>
Regeln ({minReq.rules.length})
</div>
{minReq.rules.map((rule, idx) => {
const param = parameters.find(p => p.key === rule.parameter)
const availableOps = OPERATORS.filter(op =>
param ? op.types.includes(param.data_type) : true
)
const useWeights = minReq.pass_strategy === 'weighted_score'
return (
<div
key={idx}
style={{
background: 'var(--surface2)',
padding: '12px',
borderRadius: '8px',
marginBottom: '8px'
}}
>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'flex-start', marginBottom: '12px' }}>
<div style={{ fontSize: '12px', fontWeight: '600', color: 'var(--text2)' }}>
Regel {idx + 1}
</div>
<button
className="btn"
onClick={() => removeRule(idx)}
style={{ padding: '4px 8px', minWidth: 'auto', fontSize: '11px' }}
title="Regel löschen"
>
<Trash2 size={12} /> Löschen
</button>
</div>
{/* Parameter */}
<div style={{ marginBottom: '8px' }}>
<label style={{ fontSize: '11px', color: 'var(--text3)', fontWeight: '600', display: 'block', marginBottom: '4px' }}>
WAS soll geprüft werden?
</label>
<select
className="form-select"
value={rule.parameter}
onChange={(e) => updateRule(idx, { parameter: e.target.value })}
style={{ fontSize: '13px', width: '100%' }}
>
{parameters.map(p => (
<option key={p.key} value={p.key}>
{p.name_de} ({p.unit || p.data_type})
</option>
))}
</select>
</div>
{/* Operator + Value */}
<div style={{ marginBottom: '8px' }}>
<label style={{ fontSize: '11px', color: 'var(--text3)', fontWeight: '600', display: 'block', marginBottom: '4px' }}>
BEDINGUNG
</label>
<div style={{ display: 'flex', gap: '8px', alignItems: 'flex-start' }}>
<select
className="form-select"
value={rule.operator}
onChange={(e) => updateRule(idx, { operator: e.target.value })}
style={{ fontSize: '13px', flex: rule.operator === 'between' ? '0 0 80px' : '0 0 100px' }}
>
{availableOps.map(op => (
<option key={op.value} value={op.value}>{op.label}</option>
))}
</select>
{rule.operator === 'between' ? (
<div style={{ flex: 1, display: 'flex', gap: '4px' }}>
<input
type="number"
className="form-input"
placeholder="Min"
value={Array.isArray(rule.value) ? rule.value[0] : 0}
onChange={(e) => updateRule(idx, {
value: [parseFloat(e.target.value) || 0, Array.isArray(rule.value) ? rule.value[1] : 0]
})}
style={{ fontSize: '13px', flex: 1 }}
/>
<input
type="number"
className="form-input"
placeholder="Max"
value={Array.isArray(rule.value) ? rule.value[1] : 0}
onChange={(e) => updateRule(idx, {
value: [Array.isArray(rule.value) ? rule.value[0] : 0, parseFloat(e.target.value) || 0]
})}
style={{ fontSize: '13px', flex: 1 }}
/>
</div>
) : (
<input
type="number"
className="form-input"
placeholder="z.B. 90"
value={rule.value}
onChange={(e) => updateRule(idx, { value: parseFloat(e.target.value) || 0 })}
style={{ fontSize: '13px', flex: 1 }}
/>
)}
</div>
</div>
{/* Weight - nur bei weighted_score */}
{useWeights && (
<div style={{ marginBottom: '8px', paddingTop: '8px', borderTop: '1px dashed var(--border)' }}>
<label style={{ fontSize: '11px', color: 'var(--text3)', fontWeight: '600', display: 'block', marginBottom: '4px' }}>
WICHTIGKEIT dieser Regel (1 = unwichtig, 10 = sehr wichtig)
</label>
<input
type="number"
className="form-input"
placeholder="1-10"
min="1"
max="10"
value={rule.weight}
onChange={(e) => updateRule(idx, { weight: parseInt(e.target.value) || 1 })}
style={{ fontSize: '13px', width: '80px' }}
/>
</div>
)}
{/* Reason */}
<div style={{ marginBottom: '8px' }}>
<label style={{ fontSize: '11px', color: 'var(--text3)', fontWeight: '600', display: 'block', marginBottom: '4px' }}>
Begründung (optional)
</label>
<input
type="text"
className="form-input"
placeholder="z.B. 'Techniktraining sollte ruhig sein'"
value={rule.reason}
onChange={(e) => updateRule(idx, { reason: e.target.value })}
style={{ fontSize: '12px', width: '100%' }}
/>
</div>
{/* Optional Checkbox */}
<label style={{ display: 'flex', alignItems: 'center', gap: '6px', fontSize: '12px', color: 'var(--text2)' }}>
<input
type="checkbox"
checked={rule.optional}
onChange={(e) => updateRule(idx, { optional: e.target.checked })}
/>
Optional (Regel wird übersprungen wenn Parameter fehlt)
</label>
</div>
)
})}
<button
className="btn btn-secondary btn-full"
onClick={addRule}
style={{ marginTop: '8px' }}
>
<Plus size={14} /> Regel hinzufügen
</button>
</div>
</div>
)}
</div>
{/* Other Rule Sets - Future Features */}
<div className="card" style={{ marginBottom: '16px', padding: '12px', background: 'var(--surface2)' }}>
<div style={{ fontSize: '13px', color: 'var(--text2)' }}>
<strong>Weitere Dimensionen</strong> (Analyse & Entwicklung, folgen später):
<br />
<span style={{ fontSize: '12px', lineHeight: '1.6', display: 'block', marginTop: '4px' }}>
<strong>Intensitätszonen:</strong> HF-Zonen-Verteilung analysieren<br />
<strong>Training Effects:</strong> Fähigkeitenentwicklung tracken<br />
<strong>Periodization:</strong> Frequenz & Recovery planen<br />
<strong>Performance:</strong> Fortschritt messen (Benchmarks, Trends)<br />
<strong>Safety:</strong> Warnungen bei Überbelastung
</span>
</div>
</div>
{/* Actions */}
<div style={{ display: 'flex', gap: '12px', marginTop: '20px' }}>
<button
className="btn btn-primary"
onClick={handleSave}
disabled={loading}
style={{ flex: 1 }}
>
💾 {loading ? 'Speichern...' : 'Profil speichern'}
</button>
<button
className="btn btn-secondary"
onClick={onCancel}
style={{ flex: 1 }}
>
Abbrechen
</button>
</div>
</div>
)
}

View File

@ -354,6 +354,66 @@ export default function ActivityPage() {
<div style={{display:'flex',justifyContent:'space-between',alignItems:'flex-start'}}>
<div style={{flex:1}}>
<div style={{display:'flex',alignItems:'center',gap:6,marginBottom:2}}>
{/* Evaluation Status Indicator */}
{e.quality_label ? (
<div
style={{
display:'inline-flex',
alignItems:'center',
justifyContent:'center',
width:18,
height:18,
borderRadius:9,
background: e.quality_label === 'excellent' || e.quality_label === 'good' ? '#1D9E75' :
e.quality_label === 'acceptable' ? '#EF9F27' : '#D85A30',
color:'white',
fontSize:10,
fontWeight:700,
flexShrink:0
}}
title={`Evaluation: ${e.quality_label} (Score: ${e.overall_score || 'n/a'})`}
>
</div>
) : e.training_type_id ? (
<div
style={{
display:'inline-flex',
alignItems:'center',
justifyContent:'center',
width:18,
height:18,
borderRadius:9,
background:'#EF9F27',
color:'white',
fontSize:10,
fontWeight:700,
flexShrink:0
}}
title="Trainingstyp zugeordnet, aber nicht evaluiert (kein Profil konfiguriert)"
>
</div>
) : (
<div
style={{
display:'inline-flex',
alignItems:'center',
justifyContent:'center',
width:18,
height:18,
borderRadius:9,
background:'#888780',
color:'white',
fontSize:10,
fontWeight:700,
flexShrink:0
}}
title="Kein Trainingstyp zugeordnet"
>
</div>
)}
<div style={{fontSize:14,fontWeight:600}}>{e.activity_type}</div>
{e.training_category && categories[e.training_category] && (
<div style={{

View File

@ -444,6 +444,11 @@ export default function AdminPanel() {
🔗 Activity-Mappings (lernendes System)
</button>
</Link>
<Link to="/admin/training-profiles">
<button className="btn btn-secondary btn-full">
Training Type Profiles (#15)
</button>
</Link>
</div>
</div>
</div>

View File

@ -0,0 +1,307 @@
import { useState, useEffect } from 'react'
import { api } from '../utils/api'
import '../app.css'
export default function AdminTrainingProfiles() {
const [stats, setStats] = useState(null)
const [trainingTypes, setTrainingTypes] = useState([])
const [templates, setTemplates] = useState([])
const [selectedType, setSelectedType] = useState(null)
const [editingProfile, setEditingProfile] = useState(null)
const [profileJson, setProfileJson] = useState('')
const [loading, setLoading] = useState(true)
const [error, setError] = useState('')
const [success, setSuccess] = useState('')
useEffect(() => {
load()
}, [])
const load = async () => {
try {
setLoading(true)
const [typesData, statsData, templatesData] = await Promise.all([
api.adminListTrainingTypes(),
api.getProfileStats(),
api.getProfileTemplates()
])
setTrainingTypes(typesData)
setStats(statsData)
setTemplates(templatesData)
} catch (e) {
setError(e.message)
} finally {
setLoading(false)
}
}
const openEditor = (type) => {
setSelectedType(type)
setEditingProfile(type.profile || null)
setProfileJson(JSON.stringify(type.profile || {}, null, 2))
setError('')
setSuccess('')
}
const closeEditor = () => {
setSelectedType(null)
setEditingProfile(null)
setProfileJson('')
}
const saveProfile = async () => {
try {
// Validate JSON
const profile = JSON.parse(profileJson)
// Update training type
await api.adminUpdateTrainingType(selectedType.id, { profile })
setSuccess(`Profil für "${selectedType.name_de}" gespeichert`)
closeEditor()
load()
} catch (e) {
setError(e.message || 'Ungültiges JSON')
}
}
const applyTemplate = async (typeId, templateKey) => {
if (!confirm(`Template "${templateKey}" auf diesen Trainingstyp anwenden?`)) return
try {
await api.applyProfileTemplate(typeId, templateKey)
setSuccess('Template erfolgreich angewendet')
load()
} catch (e) {
setError(e.message)
}
}
const batchReEvaluate = async () => {
if (!confirm('Alle Aktivitäten neu evaluieren? Das kann einige Sekunden dauern.')) return
try {
const result = await api.batchEvaluateActivities()
let message = `Batch-Evaluation abgeschlossen: ${result.stats.evaluated} evaluiert, ` +
`${result.stats.skipped} übersprungen, ${result.stats.errors} Fehler`
// Show error details if available
if (result.stats.error_details && result.stats.error_details.length > 0) {
message += '\n\nErste Fehler:\n' + result.stats.error_details.map(err =>
`- Aktivität ${err.activity_id} (Typ: ${err.training_type_id || 'keine'}): ${err.error}`
).join('\n')
}
if (result.stats.errors > 0) {
setError(message)
} else {
setSuccess(message)
}
} catch (e) {
setError(e.message)
}
}
if (loading) return <div className="spinner" />
return (
<div style={{ padding: '20px', maxWidth: '1200px', margin: '0 auto', paddingBottom: '100px' }}>
<h1>Training Type Profiles</h1>
<p style={{ color: 'var(--text2)', marginBottom: '24px' }}>
Konfiguriere Bewertungsprofile für Trainingstypen
</p>
{error && (
<div style={{
padding: '12px',
background: 'var(--danger)',
color: 'white',
borderRadius: '8px',
marginBottom: '16px'
}}>
{error}
</div>
)}
{success && (
<div style={{
padding: '12px',
background: 'var(--accent)',
color: 'white',
borderRadius: '8px',
marginBottom: '16px'
}}>
{success}
</div>
)}
{/* Statistics */}
{stats && (
<div className="card" style={{ marginBottom: '24px' }}>
<h3>Übersicht</h3>
<div style={{ display: 'grid', gridTemplateColumns: 'repeat(auto-fit, minmax(200px, 1fr))', gap: '16px', marginTop: '16px' }}>
<div>
<div style={{ fontSize: '32px', fontWeight: '600', color: 'var(--accent)' }}>{stats.total}</div>
<div style={{ color: 'var(--text2)', fontSize: '14px' }}>Trainingstypen gesamt</div>
</div>
<div>
<div style={{ fontSize: '32px', fontWeight: '600', color: 'var(--accent)' }}>{stats.configured}</div>
<div style={{ color: 'var(--text2)', fontSize: '14px' }}>Profile konfiguriert</div>
</div>
<div>
<div style={{ fontSize: '32px', fontWeight: '600', color: 'var(--text3)' }}>{stats.unconfigured}</div>
<div style={{ color: 'var(--text2)', fontSize: '14px' }}>Noch keine Profile</div>
</div>
</div>
<button
onClick={batchReEvaluate}
className="btn btn-primary"
style={{ marginTop: '16px', width: '100%' }}
>
🔄 Alle Aktivitäten neu evaluieren
</button>
</div>
)}
{/* Training Types List */}
<div className="card">
<h3>Trainingstypen</h3>
<div style={{ marginTop: '16px' }}>
{trainingTypes.map(type => (
<div
key={type.id}
style={{
padding: '16px',
borderBottom: '1px solid var(--border)',
display: 'flex',
alignItems: 'center',
gap: '16px'
}}
>
<div style={{ fontSize: '24px' }}>{type.icon || '📊'}</div>
<div style={{ flex: 1 }}>
<div style={{ fontWeight: '500', marginBottom: '4px' }}>
{type.name_de}
{type.profile && (
<span style={{
marginLeft: '8px',
padding: '2px 8px',
background: 'var(--accent)',
color: 'white',
borderRadius: '4px',
fontSize: '12px'
}}>
Profil
</span>
)}
</div>
<div style={{ fontSize: '14px', color: 'var(--text2)' }}>
{type.category} {type.subcategory && `· ${type.subcategory}`}
</div>
</div>
<div style={{ display: 'flex', gap: '8px', flexWrap: 'wrap' }}>
{/* Template Buttons */}
{templates.map(template => (
<button
key={template.key}
onClick={() => applyTemplate(type.id, template.key)}
className="btn"
style={{ padding: '6px 12px', fontSize: '13px' }}
title={`Template "${template.name_de}" anwenden`}
>
{template.icon} {template.name_de}
</button>
))}
<button
onClick={() => openEditor(type)}
className="btn btn-primary"
style={{ padding: '6px 16px' }}
>
{type.profile ? '✏️ Bearbeiten' : ' Profil erstellen'}
</button>
</div>
</div>
))}
</div>
</div>
{/* Profile Editor Modal */}
{selectedType && (
<div style={{
position: 'fixed',
top: 0,
left: 0,
right: 0,
bottom: 0,
background: 'rgba(0,0,0,0.7)',
display: 'flex',
alignItems: 'center',
justifyContent: 'center',
zIndex: 1000,
padding: '20px'
}}>
<div style={{
background: 'var(--bg)',
borderRadius: '12px',
maxWidth: '900px',
width: '100%',
maxHeight: '90vh',
overflow: 'auto',
padding: '24px'
}}>
<div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginBottom: '20px' }}>
<h2>{selectedType.icon} {selectedType.name_de} - Profil bearbeiten</h2>
<button onClick={closeEditor} className="btn" style={{ padding: '8px 16px' }}> Schließen</button>
</div>
<p style={{ color: 'var(--text2)', marginBottom: '16px' }}>
JSON-basierter Editor. Siehe Dokumentation für vollständige Struktur.
</p>
<textarea
value={profileJson}
onChange={(e) => setProfileJson(e.target.value)}
style={{
width: '100%',
minHeight: '400px',
padding: '12px',
fontFamily: 'monospace',
fontSize: '13px',
border: '1px solid var(--border)',
borderRadius: '8px',
background: 'var(--surface)',
color: 'var(--text1)',
resize: 'vertical'
}}
/>
<div style={{ display: 'flex', gap: '12px', marginTop: '16px' }}>
<button onClick={saveProfile} className="btn btn-primary" style={{ flex: 1 }}>
💾 Profil speichern
</button>
<button onClick={closeEditor} className="btn" style={{ flex: 1 }}>
Abbrechen
</button>
</div>
{selectedType.profile && (
<div style={{ marginTop: '16px', padding: '12px', background: 'var(--surface2)', borderRadius: '8px' }}>
<strong>Aktuelles Profil:</strong>
<div style={{ fontSize: '13px', color: 'var(--text2)', marginTop: '8px' }}>
Version: {selectedType.profile.version || 'n/a'}
<br />
Regel-Sets: {selectedType.profile.rule_sets ? Object.keys(selectedType.profile.rule_sets).join(', ') : 'keine'}
</div>
</div>
)}
</div>
</div>
)}
</div>
)
}

View File

@ -1,7 +1,8 @@
import { useState, useEffect } from 'react'
import { useNavigate } from 'react-router-dom'
import { Pencil, Trash2, Plus, Save, X, ArrowLeft } from 'lucide-react'
import { Pencil, Trash2, Plus, Save, X, ArrowLeft, Settings } from 'lucide-react'
import { api } from '../utils/api'
import ProfileBuilder from '../components/ProfileBuilder'
/**
* AdminTrainingTypesPage - CRUD for training types
@ -16,6 +17,8 @@ export default function AdminTrainingTypesPage() {
const [formData, setFormData] = useState(null)
const [error, setError] = useState(null)
const [saving, setSaving] = useState(false)
const [editingProfileId, setEditingProfileId] = useState(null)
const [parameters, setParameters] = useState([])
useEffect(() => {
load()
@ -25,10 +28,12 @@ export default function AdminTrainingTypesPage() {
setLoading(true)
Promise.all([
api.adminListTrainingTypes(),
api.getTrainingCategories()
]).then(([typesData, catsData]) => {
api.getTrainingCategories(),
api.getTrainingParameters()
]).then(([typesData, catsData, paramsData]) => {
setTypes(typesData)
setCategories(catsData)
setParameters(paramsData.parameters || [])
setLoading(false)
}).catch(err => {
console.error('Failed to load training types:', err)
@ -109,6 +114,31 @@ export default function AdminTrainingTypesPage() {
}
}
const startEditProfile = (typeId) => {
setEditingProfileId(typeId)
setEditingId(null) // Close type editor if open
setFormData(null)
}
const cancelEditProfile = () => {
setEditingProfileId(null)
}
const handleSaveProfile = async (profile) => {
try {
await api.adminUpdateTrainingType(editingProfileId, { profile })
await load()
// Success message is shown by ProfileBuilder component
// Don't close editor immediately - let user see success message
setTimeout(() => {
setEditingProfileId(null)
}, 2000)
} catch (err) {
// Error will be thrown to ProfileBuilder
throw err
}
}
// Group by category
const grouped = {}
types.forEach(type => {
@ -162,8 +192,8 @@ export default function AdminTrainingTypesPage() {
</button>
)}
{/* Edit form */}
{editingId && formData && (
{/* Edit form (only for new type creation, at top) */}
{editingId === 'new' && formData && (
<div className="card" style={{ padding: 16, marginBottom: 16 }}>
<div style={{ fontWeight: 600, marginBottom: 12 }}>
{editingId === 'new' ? ' Neuer Trainingstyp' : '✏️ Trainingstyp bearbeiten'}
@ -321,54 +351,182 @@ export default function AdminTrainingTypesPage() {
<div style={{ display: 'flex', flexDirection: 'column', gap: 8 }}>
{catTypes.sort((a, b) => a.sort_order - b.sort_order).map(type => (
<div
key={type.id}
style={{
display: 'flex',
alignItems: 'center',
gap: 8,
padding: 8,
background: 'var(--surface)',
borderRadius: 6
}}
>
<div style={{ fontSize: 18 }}>{type.icon}</div>
<div style={{ flex: 1 }}>
<div style={{ fontSize: 13, fontWeight: 500 }}>
{type.name_de} <span style={{ color: 'var(--text3)' }}>/ {type.name_en}</span>
</div>
{type.subcategory && (
<div style={{ fontSize: 11, color: 'var(--text3)' }}>
Subkategorie: {type.subcategory}
<div key={type.id}>
{/* Type Row */}
<div
style={{
display: 'flex',
alignItems: 'center',
gap: 8,
padding: 8,
background: 'var(--surface)',
borderRadius: 6
}}
>
<div style={{ fontSize: 18 }}>{type.icon}</div>
<div style={{ flex: 1 }}>
<div style={{ fontSize: 13, fontWeight: 500 }}>
{type.name_de} <span style={{ color: 'var(--text3)' }}>/ {type.name_en}</span>
{type.profile && (
<span style={{
marginLeft: 8,
padding: '2px 6px',
background: 'var(--accent)',
color: 'white',
borderRadius: 4,
fontSize: 10
}}>
Profil
</span>
)}
</div>
)}
{type.subcategory && (
<div style={{ fontSize: 11, color: 'var(--text3)' }}>
Subkategorie: {type.subcategory}
</div>
)}
</div>
<button
onClick={() => startEditProfile(type.id)}
style={{
background: 'none',
border: 'none',
cursor: 'pointer',
padding: 6,
color: 'var(--accent)'
}}
title="Profil konfigurieren"
>
<Settings size={16} />
</button>
<button
onClick={() => startEdit(type)}
style={{
background: 'none',
border: 'none',
cursor: 'pointer',
padding: 6,
color: 'var(--accent)'
}}
title="Bearbeiten"
>
<Pencil size={16} />
</button>
<button
onClick={() => handleDelete(type.id, type.name_de)}
style={{
background: 'none',
border: 'none',
cursor: 'pointer',
padding: 6,
color: '#D85A30'
}}
title="Löschen"
>
<Trash2 size={16} />
</button>
</div>
<button
onClick={() => startEdit(type)}
style={{
background: 'none',
border: 'none',
cursor: 'pointer',
padding: 6,
color: 'var(--accent)'
}}
title="Bearbeiten"
>
<Pencil size={16} />
</button>
<button
onClick={() => handleDelete(type.id, type.name_de)}
style={{
background: 'none',
border: 'none',
cursor: 'pointer',
padding: 6,
color: '#D85A30'
}}
title="Löschen"
>
<Trash2 size={16} />
</button>
{/* Inline Profile Builder */}
{editingProfileId === type.id && (
<div style={{ marginTop: 8 }}>
<ProfileBuilder
trainingType={type}
parameters={parameters}
onSave={handleSaveProfile}
onCancel={cancelEditProfile}
/>
</div>
)}
{/* Inline Type Editor */}
{editingId === type.id && formData && (
<div style={{ marginTop: 8, padding: 16, background: 'var(--surface2)', borderRadius: 8 }}>
<div style={{ fontWeight: 600, marginBottom: 12 }}> Trainingstyp bearbeiten</div>
<div style={{ display: 'flex', flexDirection: 'column', gap: 16 }}>
<div>
<div className="form-label">Kategorie *</div>
<select
className="form-input"
value={formData.category}
onChange={e => setFormData({ ...formData, category: e.target.value })}
style={{ width: '100%' }}
>
{Object.keys(categories).map(cat => (
<option key={cat} value={cat}>
{categories[cat].icon} {categories[cat].name_de}
</option>
))}
</select>
</div>
<div>
<div className="form-label">Subkategorie</div>
<input
className="form-input"
value={formData.subcategory}
onChange={e => setFormData({ ...formData, subcategory: e.target.value })}
placeholder="z.B. running, hypertrophy, meditation"
style={{ width: '100%' }}
/>
</div>
<div>
<div className="form-label">Name (Deutsch) *</div>
<input
className="form-input"
value={formData.name_de}
onChange={e => setFormData({ ...formData, name_de: e.target.value })}
placeholder="z.B. Laufen"
style={{ width: '100%' }}
/>
</div>
<div>
<div className="form-label">Name (English) *</div>
<input
className="form-input"
value={formData.name_en}
onChange={e => setFormData({ ...formData, name_en: e.target.value })}
placeholder="e.g. Running"
style={{ width: '100%' }}
/>
</div>
<div>
<div className="form-label">Icon (Emoji)</div>
<input
className="form-input"
value={formData.icon}
onChange={e => setFormData({ ...formData, icon: e.target.value })}
placeholder="🏃"
maxLength={10}
style={{ width: '100%' }}
/>
</div>
<div>
<div className="form-label">Sortierung</div>
<input
type="number"
className="form-input"
value={formData.sort_order}
onChange={e => setFormData({ ...formData, sort_order: parseInt(e.target.value) })}
style={{ width: '100%' }}
/>
</div>
<div style={{ display: 'flex', gap: 8 }}>
<button onClick={handleSave} disabled={saving} className="btn btn-primary" style={{ flex: 1 }}>
{saving ? <><div className="spinner" style={{ width: 14, height: 14 }} /> Speichere...</> : <><Save size={16} /> Speichern</>}
</button>
<button onClick={cancelEdit} disabled={saving} className="btn btn-secondary" style={{ flex: 1 }}>
<X size={16} /> Abbrechen
</button>
</div>
</div>
</div>
)}
</div>
))}
</div>

View File

@ -57,7 +57,11 @@ function PromptEditor({ prompt, onSave, onCancel }) {
'{{weight_trend}}','{{weight_aktuell}}','{{kf_aktuell}}','{{caliper_summary}}',
'{{circ_summary}}','{{nutrition_summary}}','{{nutrition_detail}}',
'{{protein_ziel_low}}','{{protein_ziel_high}}','{{activity_summary}}',
'{{activity_kcal_summary}}','{{activity_detail}}']
'{{activity_kcal_summary}}','{{activity_detail}}',
'{{sleep_summary}}','{{sleep_detail}}','{{sleep_avg_duration}}','{{sleep_avg_quality}}',
'{{rest_days_summary}}','{{rest_days_count}}','{{rest_days_types}}',
'{{vitals_summary}}','{{vitals_detail}}','{{vitals_avg_hr}}','{{vitals_avg_hrv}}',
'{{vitals_avg_bp}}','{{vitals_vo2_max}}','{{bp_summary}}']
return (
<div className="card section-gap">

View File

@ -59,6 +59,13 @@ const ENTRIES = [
to: '/rest-days',
color: '#9B59B6',
},
{
icon: '❤️',
label: 'Vitalwerte',
sub: 'Ruhepuls und HRV morgens erfassen',
to: '/vitals',
color: '#E74C3C',
},
{
icon: '📖',
label: 'Messanleitung',

File diff suppressed because it is too large Load Diff

View File

@ -214,6 +214,14 @@ export const api = {
adminDeleteTrainingType: (id) => req(`/admin/training-types/${id}`, {method:'DELETE'}),
getAbilitiesTaxonomy: () => req('/admin/training-types/taxonomy/abilities'),
// Admin: Training Type Profiles (Phase 2 #15)
getProfileStats: () => req('/admin/training-types/profiles/stats'),
getProfileTemplates: () => req('/admin/training-types/profiles/templates'),
getProfileTemplate: (key) => req(`/admin/training-types/profiles/templates/${key}`),
applyProfileTemplate: (id,templateKey) => req(`/admin/training-types/${id}/profile/apply-template`, json({template_key: templateKey})),
getTrainingParameters: () => req('/evaluation/parameters'),
batchEvaluateActivities: () => req('/evaluation/batch', {method:'POST'}),
// Admin: Activity Type Mappings (v9d Phase 1b - Learnable System)
adminListActivityMappings: (profileId, globalOnly) => req(`/admin/activity-mappings${profileId?'?profile_id='+profileId:''}${globalOnly?'?global_only=true':''}`),
adminGetActivityMapping: (id) => req(`/admin/activity-mappings/${id}`),
@ -248,4 +256,30 @@ export const api = {
deleteRestDay: (id) => req(`/rest-days/${id}`, {method:'DELETE'}),
getRestDaysStats: (weeks=4) => req(`/rest-days/stats?weeks=${weeks}`),
validateActivity: (date, activityType) => req('/rest-days/validate-activity', json({date, activity_type: activityType})),
// Vitals Baseline (v9d Phase 2d Refactored - once daily, morning)
listBaseline: (l=90) => req(`/vitals/baseline?limit=${l}`),
getBaselineByDate: (date) => req(`/vitals/baseline/by-date/${date}`),
createBaseline: (d) => req('/vitals/baseline', json(d)),
updateBaseline: (id,d) => req(`/vitals/baseline/${id}`, jput(d)),
deleteBaseline: (id) => req(`/vitals/baseline/${id}`, {method:'DELETE'}),
getBaselineStats: (days=30) => req(`/vitals/baseline/stats?days=${days}`),
importBaselineAppleHealth: (file) => {
const fd = new FormData()
fd.append('file', file)
return req('/vitals/baseline/import/apple-health', {method:'POST', body:fd})
},
// Blood Pressure (v9d Phase 2d Refactored - multiple daily, context-aware)
listBloodPressure: (l=90) => req(`/blood-pressure?limit=${l}`),
getBPByDate: (date) => req(`/blood-pressure/by-date/${date}`),
createBloodPressure:(d) => req('/blood-pressure', json(d)),
updateBloodPressure:(id,d) => req(`/blood-pressure/${id}`, jput(d)),
deleteBloodPressure:(id) => req(`/blood-pressure/${id}`, {method:'DELETE'}),
getBPStats: (days=30) => req(`/blood-pressure/stats?days=${days}`),
importBPOmron: (file) => {
const fd = new FormData()
fd.append('file', file)
return req('/blood-pressure/import/omron', {method:'POST', body:fd})
},
}