From 1b9cd6d5e6897d221936dc68aa492df51ab1adc9 Mon Sep 17 00:00:00 2001 From: Lars Date: Mon, 23 Mar 2026 10:49:26 +0100 Subject: [PATCH 01/29] feat: Training Type Profiles - Phase 1.1 Foundation (#15) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Implemented ### DB-Schema (Migrations) - Migration 013: training_parameters table (16 standard parameters) - Migration 014: training_types.profile + activity_log.evaluation columns - Performance metric calculations (avg_hr_percent, kcal_per_km) ### Backend - Rule Engine - RuleEvaluator: Generic rule evaluation with 9 operators - gte, lte, gt, lt, eq, neq, between, in, not_in - Weighted scoring system - Pass strategies: all_must_pass, weighted_score, at_least_n - IntensityZoneEvaluator: HR zone analysis - TrainingEffectsEvaluator: Abilities development ### Backend - Master Evaluator - TrainingProfileEvaluator: 7-dimensional evaluation 1. Minimum Requirements (Quality Gates) 2. Intensity Zones (HR zones) 3. Training Effects (Abilities) 4. Periodization (Frequency & Recovery) 5. Performance Indicators (KPIs) 6. Safety (Warnings) 7. AI Context (simplified for MVP) - evaluation_helper.py: Utilities for loading + saving - routers/evaluation.py: API endpoints - POST /api/evaluation/activity/{id} - POST /api/evaluation/batch - GET /api/evaluation/parameters ### Integration - main.py: Router registration ## TODO (Phase 1.2) - Auto-evaluation on activity INSERT/UPDATE - Admin-UI for profile editing - User-UI for results display ## Testing - βœ… Syntax checks passed - πŸ”² Runtime testing pending (after auto-evaluation) Part of Issue #15 - Training Type Profiles System --- backend/evaluation_helper.py | 252 +++++++++++ backend/main.py | 2 + .../migrations/013_training_parameters.sql | 144 ++++++ backend/migrations/014_training_profiles.sql | 114 +++++ backend/profile_evaluator.py | 349 ++++++++++++++ backend/routers/evaluation.py | 146 ++++++ backend/rule_engine.py | 427 ++++++++++++++++++ 7 files changed, 1434 insertions(+) create mode 100644 backend/evaluation_helper.py create mode 100644 backend/migrations/013_training_parameters.sql create mode 100644 backend/migrations/014_training_profiles.sql create mode 100644 backend/profile_evaluator.py create mode 100644 backend/routers/evaluation.py create mode 100644 backend/rule_engine.py diff --git a/backend/evaluation_helper.py b/backend/evaluation_helper.py new file mode 100644 index 0000000..f622ca2 --- /dev/null +++ b/backend/evaluation_helper.py @@ -0,0 +1,252 @@ +""" +Training Type Profiles - Helper Functions +Utilities for loading parameters, profiles, and running evaluations. + +Issue: #15 +Date: 2026-03-23 +""" +from typing import Dict, Optional, List +import logging + +from db import get_cursor +from profile_evaluator import TrainingProfileEvaluator + +logger = logging.getLogger(__name__) + + +def load_parameters_registry(cur) -> Dict[str, Dict]: + """ + Loads training parameters registry from database. + + Returns: + Dict mapping parameter_key -> config + """ + cur.execute(""" + SELECT key, name_de, name_en, category, data_type, unit, + description_de, source_field, validation_rules + FROM training_parameters + WHERE is_active = true + """) + + registry = {} + for row in cur.fetchall(): + registry[row['key']] = dict(row) + + return registry + + +def load_training_type_profile(cur, training_type_id: int) -> Optional[Dict]: + """ + Loads training type profile for a given type ID. + + Returns: + Profile JSONB or None if not configured + """ + cur.execute( + "SELECT profile FROM training_types WHERE id = %s", + (training_type_id,) + ) + row = cur.fetchone() + + if row and row['profile']: + return row['profile'] + + return None + + +def load_evaluation_context( + cur, + profile_id: str, + activity_date: str, + lookback_days: int = 30 +) -> Dict: + """ + Loads context data for evaluation (user profile + recent activities). + + Args: + cur: Database cursor + profile_id: User profile ID + activity_date: Date of activity being evaluated + lookback_days: How many days of history to load + + Returns: + { + "user_profile": {...}, + "recent_activities": [...], + "historical_activities": [...] + } + """ + # Load user profile + cur.execute( + "SELECT hf_max, sleep_goal_minutes FROM profiles WHERE id = %s", + (profile_id,) + ) + user_row = cur.fetchone() + user_profile = dict(user_row) if user_row else {} + + # Load recent activities (last N days) + cur.execute(""" + SELECT id, date, training_type_id, duration_min, hr_avg, hr_max, + distance_km, kcal_active, rpe + FROM activity_log + WHERE profile_id = %s + AND date >= %s::date - INTERVAL '%s days' + AND date < %s::date + ORDER BY date DESC + LIMIT 50 + """, (profile_id, activity_date, lookback_days, activity_date)) + + recent_activities = [dict(r) for r in cur.fetchall()] + + # Historical activities (same for MVP) + historical_activities = recent_activities + + return { + "user_profile": user_profile, + "recent_activities": recent_activities, + "historical_activities": historical_activities + } + + +def evaluate_and_save_activity( + cur, + activity_id: str, + activity_data: Dict, + training_type_id: int, + profile_id: str +) -> Optional[Dict]: + """ + Evaluates an activity and saves the result to the database. + + Args: + cur: Database cursor + activity_id: Activity ID + activity_data: Activity data dict + training_type_id: Training type ID + profile_id: User profile ID + + Returns: + Evaluation result or None if no profile configured + """ + # Load profile + profile = load_training_type_profile(cur, training_type_id) + if not profile: + logger.info(f"[EVALUATION] No profile for training_type {training_type_id}, skipping") + return None + + # Load parameters registry + parameters = load_parameters_registry(cur) + + # Load context + context = load_evaluation_context( + cur, + profile_id, + activity_data.get("date"), + lookback_days=30 + ) + + # Evaluate + evaluator = TrainingProfileEvaluator(parameters) + evaluation_result = evaluator.evaluate_activity( + activity_data, + profile, + context + ) + + # Save to database + from psycopg2.extras import Json + + cur.execute(""" + UPDATE activity_log + SET evaluation = %s, + quality_label = %s, + overall_score = %s + WHERE id = %s + """, ( + Json(evaluation_result), + evaluation_result.get("quality_label"), + evaluation_result.get("overall_score"), + activity_id + )) + + logger.info( + f"[EVALUATION] Activity {activity_id}: " + f"{evaluation_result.get('quality_label')} " + f"(score: {evaluation_result.get('overall_score')})" + ) + + return evaluation_result + + +def batch_evaluate_activities( + cur, + profile_id: str, + limit: Optional[int] = None +) -> Dict: + """ + Re-evaluates all activities for a user. + + Useful for: + - Initial setup after profiles are configured + - Re-evaluation after profile changes + + Args: + cur: Database cursor + profile_id: User profile ID + limit: Optional limit for testing + + Returns: + { + "total": int, + "evaluated": int, + "skipped": int, + "errors": int + } + """ + # Load all activities + query = """ + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE profile_id = %s + ORDER BY date DESC + """ + params = [profile_id] + + if limit: + query += " LIMIT %s" + params.append(limit) + + cur.execute(query, params) + activities = cur.fetchall() + + stats = { + "total": len(activities), + "evaluated": 0, + "skipped": 0, + "errors": 0 + } + + for activity in activities: + activity_dict = dict(activity) + try: + result = evaluate_and_save_activity( + cur, + activity_dict["id"], + activity_dict, + activity_dict["training_type_id"], + profile_id + ) + + if result: + stats["evaluated"] += 1 + else: + stats["skipped"] += 1 + + except Exception as e: + logger.error(f"[BATCH-EVAL] Error evaluating {activity_dict['id']}: {e}") + stats["errors"] += 1 + + logger.info(f"[BATCH-EVAL] Completed: {stats}") + return stats diff --git a/backend/main.py b/backend/main.py index fba2846..a7689ec 100644 --- a/backend/main.py +++ b/backend/main.py @@ -21,6 +21,7 @@ from routers import admin, stats, exportdata, importdata from routers import subscription, coupons, features, tiers_mgmt, tier_limits from routers import user_restrictions, access_grants, training_types, admin_training_types from routers import admin_activity_mappings, sleep, rest_days +from routers import evaluation # v9d/v9e Training Type Profiles (#15) # ── App Configuration ───────────────────────────────────────────────────────── DATA_DIR = Path(os.getenv("DATA_DIR", "./data")) @@ -92,6 +93,7 @@ app.include_router(admin_training_types.router) # /api/admin/training-types/* app.include_router(admin_activity_mappings.router) # /api/admin/activity-mappings/* app.include_router(sleep.router) # /api/sleep/* (v9d Phase 2b) app.include_router(rest_days.router) # /api/rest-days/* (v9d Phase 2a) +app.include_router(evaluation.router) # /api/evaluation/* (v9d/v9e Training Profiles #15) # ── Health Check ────────────────────────────────────────────────────────────── @app.get("/") diff --git a/backend/migrations/013_training_parameters.sql b/backend/migrations/013_training_parameters.sql new file mode 100644 index 0000000..9f80bcf --- /dev/null +++ b/backend/migrations/013_training_parameters.sql @@ -0,0 +1,144 @@ +-- Migration 013: Training Parameters Registry +-- Training Type Profiles System - Foundation +-- Date: 2026-03-23 +-- Issue: #15 + +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +-- TRAINING PARAMETERS REGISTRY +-- Zentrale Definition aller messbaren Parameter fΓΌr AktivitΓ€ten +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +CREATE TABLE IF NOT EXISTS training_parameters ( + id SERIAL PRIMARY KEY, + key VARCHAR(50) UNIQUE NOT NULL, + name_de VARCHAR(100) NOT NULL, + name_en VARCHAR(100) NOT NULL, + category VARCHAR(50) NOT NULL, + data_type VARCHAR(20) NOT NULL, + unit VARCHAR(20), + description_de TEXT, + description_en TEXT, + source_field VARCHAR(100), + validation_rules JSONB DEFAULT '{}'::jsonb, + is_active BOOLEAN DEFAULT true, + created_at TIMESTAMP DEFAULT NOW(), + + CONSTRAINT chk_category CHECK (category IN ( + 'physical', 'physiological', 'subjective', 'environmental', 'performance' + )), + CONSTRAINT chk_data_type CHECK (data_type IN ( + 'integer', 'float', 'string', 'boolean' + )) +); + +CREATE INDEX idx_training_parameters_category ON training_parameters(category) WHERE is_active = true; +CREATE INDEX idx_training_parameters_key ON training_parameters(key) WHERE is_active = true; + +COMMENT ON TABLE training_parameters IS 'Registry of all measurable activity parameters (Training Type Profiles System)'; +COMMENT ON COLUMN training_parameters.key IS 'Unique identifier (e.g. "avg_hr", "duration_min")'; +COMMENT ON COLUMN training_parameters.category IS 'Parameter category: physical, physiological, subjective, environmental, performance'; +COMMENT ON COLUMN training_parameters.data_type IS 'Data type: integer, float, string, boolean'; +COMMENT ON COLUMN training_parameters.source_field IS 'Mapping to activity_log column name'; +COMMENT ON COLUMN training_parameters.validation_rules IS 'Min/Max/Enum for validation (JSONB)'; + +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +-- STANDARD PARAMETERS +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +INSERT INTO training_parameters (key, name_de, name_en, category, data_type, unit, source_field, validation_rules, description_de, description_en) VALUES + +-- Physical Parameters +('duration_min', 'Dauer', 'Duration', 'physical', 'integer', 'min', 'duration_min', + '{"min": 0, "max": 600}'::jsonb, + 'Trainingsdauer in Minuten', + 'Training duration in minutes'), + +('distance_km', 'Distanz', 'Distance', 'physical', 'float', 'km', 'distance_km', + '{"min": 0, "max": 200}'::jsonb, + 'ZurΓΌckgelegte Distanz in Kilometern', + 'Distance covered in kilometers'), + +('kcal_active', 'Aktive Kalorien', 'Active Calories', 'physical', 'integer', 'kcal', 'kcal_active', + '{"min": 0, "max": 5000}'::jsonb, + 'Aktiver Kalorienverbrauch', + 'Active calorie burn'), + +('kcal_resting', 'Ruhekalorien', 'Resting Calories', 'physical', 'integer', 'kcal', 'kcal_resting', + '{"min": 0, "max": 2000}'::jsonb, + 'Ruheumsatz wΓ€hrend Training', + 'Resting calorie burn during training'), + +('elevation_gain', 'HΓΆhenmeter', 'Elevation Gain', 'physical', 'integer', 'm', 'elevation_gain', + '{"min": 0, "max": 5000}'::jsonb, + 'Überwundene HΓΆhenmeter', + 'Elevation gain in meters'), + +('pace_min_per_km', 'Pace', 'Pace', 'physical', 'float', 'min/km', 'pace_min_per_km', + '{"min": 2, "max": 20}'::jsonb, + 'Durchschnittstempo in Minuten pro Kilometer', + 'Average pace in minutes per kilometer'), + +('cadence', 'Trittfrequenz', 'Cadence', 'physical', 'integer', 'spm', 'cadence', + '{"min": 0, "max": 220}'::jsonb, + 'Schrittfrequenz (Schritte pro Minute)', + 'Step frequency (steps per minute)'), + +-- Physiological Parameters +('avg_hr', 'Durchschnittspuls', 'Average Heart Rate', 'physiological', 'integer', 'bpm', 'hr_avg', + '{"min": 30, "max": 220}'::jsonb, + 'Durchschnittliche Herzfrequenz', + 'Average heart rate'), + +('max_hr', 'Maximalpuls', 'Max Heart Rate', 'physiological', 'integer', 'bpm', 'hr_max', + '{"min": 40, "max": 220}'::jsonb, + 'Maximale Herzfrequenz', + 'Maximum heart rate'), + +('min_hr', 'Minimalpuls', 'Min Heart Rate', 'physiological', 'integer', 'bpm', 'hr_min', + '{"min": 30, "max": 200}'::jsonb, + 'Minimale Herzfrequenz', + 'Minimum heart rate'), + +('avg_power', 'Durchschnittsleistung', 'Average Power', 'physiological', 'integer', 'W', 'avg_power', + '{"min": 0, "max": 1000}'::jsonb, + 'Durchschnittliche Leistung in Watt', + 'Average power output in watts'), + +-- Subjective Parameters +('rpe', 'RPE (Anstrengung)', 'RPE (Perceived Exertion)', 'subjective', 'integer', 'scale', 'rpe', + '{"min": 1, "max": 10}'::jsonb, + 'Subjektive Anstrengung (Rate of Perceived Exertion)', + 'Rate of Perceived Exertion'), + +-- Environmental Parameters +('temperature_celsius', 'Temperatur', 'Temperature', 'environmental', 'float', 'Β°C', 'temperature_celsius', + '{"min": -30, "max": 50}'::jsonb, + 'Umgebungstemperatur in Celsius', + 'Ambient temperature in Celsius'), + +('humidity_percent', 'Luftfeuchtigkeit', 'Humidity', 'environmental', 'integer', '%', 'humidity_percent', + '{"min": 0, "max": 100}'::jsonb, + 'Relative Luftfeuchtigkeit in Prozent', + 'Relative humidity in percent'), + +-- Performance Parameters (calculated) +('avg_hr_percent', '% Max-HF', '% Max HR', 'performance', 'float', '%', 'avg_hr_percent', + '{"min": 0, "max": 100}'::jsonb, + 'Durchschnittspuls als Prozent der maximalen Herzfrequenz', + 'Average heart rate as percentage of max heart rate'), + +('kcal_per_km', 'Kalorien pro km', 'Calories per km', 'performance', 'float', 'kcal/km', 'kcal_per_km', + 'Kalorienverbrauch pro Kilometer', + 'Calorie burn per kilometer'); + +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +-- SUMMARY +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +-- Display inserted parameters +DO $$ +BEGIN + RAISE NOTICE 'βœ“ Migration 013 completed'; + RAISE NOTICE ' - Created training_parameters table'; + RAISE NOTICE ' - Inserted % standard parameters', (SELECT COUNT(*) FROM training_parameters); +END $$; diff --git a/backend/migrations/014_training_profiles.sql b/backend/migrations/014_training_profiles.sql new file mode 100644 index 0000000..6747c6c --- /dev/null +++ b/backend/migrations/014_training_profiles.sql @@ -0,0 +1,114 @@ +-- Migration 014: Training Type Profiles & Activity Evaluation +-- Training Type Profiles System - Schema Extensions +-- Date: 2026-03-23 +-- Issue: #15 + +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +-- EXTEND TRAINING TYPES +-- Add profile column for comprehensive training type configuration +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +ALTER TABLE training_types ADD COLUMN IF NOT EXISTS profile JSONB DEFAULT NULL; + +CREATE INDEX idx_training_types_profile_enabled ON training_types + ((profile->'rule_sets'->'minimum_requirements'->>'enabled')) + WHERE profile IS NOT NULL; + +COMMENT ON COLUMN training_types.profile IS 'Comprehensive training type profile with 7 dimensions (rule_sets, intensity_zones, training_effects, periodization, performance_indicators, safety, ai_context)'; + +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +-- EXTEND ACTIVITY LOG +-- Add evaluation results and quality labels +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS evaluation JSONB DEFAULT NULL; +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS quality_label VARCHAR(20); +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS overall_score FLOAT; + +CREATE INDEX idx_activity_quality_label ON activity_log(quality_label) + WHERE quality_label IS NOT NULL; + +CREATE INDEX idx_activity_overall_score ON activity_log(overall_score DESC) + WHERE overall_score IS NOT NULL; + +CREATE INDEX idx_activity_evaluation_passed ON activity_log + ((evaluation->'rule_set_results'->'minimum_requirements'->>'passed')) + WHERE evaluation IS NOT NULL; + +COMMENT ON COLUMN activity_log.evaluation IS 'Complete evaluation result (7 dimensions, scores, recommendations, warnings)'; +COMMENT ON COLUMN activity_log.quality_label IS 'Quality label: excellent, good, acceptable, poor (for quick filtering)'; +COMMENT ON COLUMN activity_log.overall_score IS 'Overall quality score 0.0-1.0 (for sorting)'; + +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +-- ADD MISSING COLUMNS (if not already added by previous migrations) +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +-- Add HR columns if not exist (might be in Migration 008) +DO $$ +BEGIN + IF NOT EXISTS (SELECT 1 FROM information_schema.columns + WHERE table_name='activity_log' AND column_name='hr_min') THEN + ALTER TABLE activity_log ADD COLUMN hr_min INTEGER CHECK (hr_min > 0 AND hr_min < 200); + END IF; +END $$; + +-- Add performance columns for calculated values +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS avg_hr_percent FLOAT; +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS kcal_per_km FLOAT; +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS pace_min_per_km FLOAT; +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS cadence INTEGER; +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS avg_power INTEGER; +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS elevation_gain INTEGER; +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS temperature_celsius FLOAT; +ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS humidity_percent INTEGER; + +COMMENT ON COLUMN activity_log.avg_hr_percent IS 'Average HR as percentage of user max HR (calculated)'; +COMMENT ON COLUMN activity_log.kcal_per_km IS 'Calories burned per kilometer (calculated)'; + +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +-- HELPER FUNCTION: Calculate avg_hr_percent +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +CREATE OR REPLACE FUNCTION calculate_avg_hr_percent() +RETURNS TRIGGER AS $$ +DECLARE + user_max_hr INTEGER; +BEGIN + -- Get user's max HR from profile + SELECT hf_max INTO user_max_hr + FROM profiles + WHERE id = NEW.profile_id; + + -- Calculate percentage if both values exist + IF NEW.hr_avg IS NOT NULL AND user_max_hr IS NOT NULL AND user_max_hr > 0 THEN + NEW.avg_hr_percent := (NEW.hr_avg::float / user_max_hr::float) * 100; + END IF; + + -- Calculate kcal per km + IF NEW.kcal_active IS NOT NULL AND NEW.distance_km IS NOT NULL AND NEW.distance_km > 0 THEN + NEW.kcal_per_km := NEW.kcal_active::float / NEW.distance_km; + END IF; + + RETURN NEW; +END; +$$ LANGUAGE plpgsql; + +-- Trigger for automatic calculation +DROP TRIGGER IF EXISTS trg_calculate_performance_metrics ON activity_log; +CREATE TRIGGER trg_calculate_performance_metrics + BEFORE INSERT OR UPDATE ON activity_log + FOR EACH ROW + EXECUTE FUNCTION calculate_avg_hr_percent(); + +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +-- SUMMARY +-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +DO $$ +BEGIN + RAISE NOTICE 'βœ“ Migration 014 completed'; + RAISE NOTICE ' - Extended training_types with profile column'; + RAISE NOTICE ' - Extended activity_log with evaluation columns'; + RAISE NOTICE ' - Added performance metric calculations'; + RAISE NOTICE ' - Created indexes for fast queries'; +END $$; diff --git a/backend/profile_evaluator.py b/backend/profile_evaluator.py new file mode 100644 index 0000000..c2f05a3 --- /dev/null +++ b/backend/profile_evaluator.py @@ -0,0 +1,349 @@ +""" +Training Type Profiles - Master Evaluator +Comprehensive activity evaluation across all 7 dimensions. + +Issue: #15 +Date: 2026-03-23 +""" +from typing import Dict, Optional, List +from datetime import datetime +import logging + +from rule_engine import RuleEvaluator, IntensityZoneEvaluator, TrainingEffectsEvaluator + +logger = logging.getLogger(__name__) + + +class TrainingProfileEvaluator: + """ + Master class for comprehensive activity evaluation. + + Evaluates an activity against a training type profile across 7 dimensions: + 1. Minimum Requirements (Quality Gates) + 2. Intensity Zones (HR zones) + 3. Training Effects (Abilities) + 4. Periodization (Frequency & Recovery) + 5. Performance Indicators (KPIs) + 6. Safety (Warnings) + 7. AI Context + """ + + def __init__(self, parameters_registry: Dict[str, Dict]): + """ + Initialize evaluator with parameter registry. + + Args: + parameters_registry: Dict mapping parameter_key -> config + """ + self.parameters_registry = parameters_registry + self.rule_evaluator = RuleEvaluator() + self.zone_evaluator = IntensityZoneEvaluator() + self.effects_evaluator = TrainingEffectsEvaluator() + + def evaluate_activity( + self, + activity: Dict, + training_type_profile: Optional[Dict], + context: Optional[Dict] = None + ) -> Dict: + """ + Complete evaluation of an activity against its training type profile. + + Args: + activity: Activity data dictionary + training_type_profile: Training type profile (JSONB) + context: { + "user_profile": {...}, + "recent_activities": [...], + "historical_activities": [...] + } + + Returns: + { + "evaluated_at": ISO timestamp, + "profile_version": str, + "rule_set_results": { + "minimum_requirements": {...}, + "intensity_zones": {...}, + "training_effects": {...}, + "periodization": {...}, + "performance_indicators": {...}, + "safety": {...} + }, + "overall_score": float (0-1), + "quality_label": str, + "recommendations": [str], + "warnings": [str] + } + """ + # No profile? Return unvalidated result + if not training_type_profile: + return self._create_unvalidated_result() + + rule_sets = training_type_profile.get("rule_sets", {}) + context = context or {} + + results = { + "evaluated_at": datetime.now().isoformat(), + "profile_version": training_type_profile.get("version", "unknown"), + "rule_set_results": {} + } + + # ━━━ 1. MINIMUM REQUIREMENTS ━━━ + if "minimum_requirements" in rule_sets: + results["rule_set_results"]["minimum_requirements"] = \ + self.rule_evaluator.evaluate_rule_set( + rule_sets["minimum_requirements"], + activity, + self.parameters_registry + ) + + # ━━━ 2. INTENSITY ZONES ━━━ + if "intensity_zones" in rule_sets: + results["rule_set_results"]["intensity_zones"] = \ + self.zone_evaluator.evaluate( + rule_sets["intensity_zones"], + activity, + context.get("user_profile", {}) + ) + + # ━━━ 3. TRAINING EFFECTS ━━━ + if "training_effects" in rule_sets: + results["rule_set_results"]["training_effects"] = \ + self.effects_evaluator.evaluate( + rule_sets["training_effects"], + activity, + results["rule_set_results"].get("intensity_zones") + ) + + # ━━━ 4. PERIODIZATION ━━━ + if "periodization" in rule_sets: + results["rule_set_results"]["periodization"] = \ + self._evaluate_periodization( + rule_sets["periodization"], + activity, + context.get("recent_activities", []) + ) + + # ━━━ 5. PERFORMANCE INDICATORS ━━━ + if "performance_indicators" in rule_sets: + results["rule_set_results"]["performance_indicators"] = \ + self._evaluate_performance( + rule_sets["performance_indicators"], + activity, + context.get("historical_activities", []) + ) + + # ━━━ 6. SAFETY WARNINGS ━━━ + if "safety" in rule_sets: + results["rule_set_results"]["safety"] = \ + self._evaluate_safety( + rule_sets["safety"], + activity + ) + + # ━━━ OVERALL SCORE & QUALITY LABEL ━━━ + overall_score = self._calculate_overall_score(results["rule_set_results"]) + results["overall_score"] = overall_score + results["quality_label"] = self._get_quality_label(overall_score) + + # ━━━ RECOMMENDATIONS & WARNINGS ━━━ + results["recommendations"] = self._generate_recommendations(results) + results["warnings"] = self._collect_warnings(results) + + return results + + def _create_unvalidated_result(self) -> Dict: + """Creates result for activities without profile.""" + return { + "evaluated_at": datetime.now().isoformat(), + "profile_version": None, + "rule_set_results": {}, + "overall_score": None, + "quality_label": None, + "recommendations": ["Kein Trainingsprofil konfiguriert"], + "warnings": [] + } + + def _evaluate_periodization( + self, + config: Dict, + activity: Dict, + recent_activities: List[Dict] + ) -> Dict: + """ + Evaluates periodization compliance (frequency & recovery). + + Simplified for MVP - full implementation later. + """ + if not config.get("enabled", False): + return {"enabled": False} + + # Basic frequency check + training_type_id = activity.get("training_type_id") + same_type_this_week = sum( + 1 for a in recent_activities + if a.get("training_type_id") == training_type_id + ) + + frequency_config = config.get("frequency", {}) + optimal = frequency_config.get("per_week_optimal", 3) + + return { + "enabled": True, + "weekly_count": same_type_this_week, + "optimal_count": optimal, + "frequency_status": "optimal" if same_type_this_week <= optimal else "over_optimal", + "recovery_adequate": True, # Simplified for MVP + "warning": None + } + + def _evaluate_performance( + self, + config: Dict, + activity: Dict, + historical_activities: List[Dict] + ) -> Dict: + """ + Evaluates performance development. + + Simplified for MVP - full implementation later. + """ + if not config.get("enabled", False): + return {"enabled": False} + + return { + "enabled": True, + "trend": "stable", # Simplified + "metrics_comparison": {}, + "benchmark_level": "intermediate" + } + + def _evaluate_safety(self, config: Dict, activity: Dict) -> Dict: + """ + Evaluates safety warnings. + """ + if not config.get("enabled", False): + return {"enabled": False, "warnings": []} + + warnings_config = config.get("warnings", []) + triggered_warnings = [] + + for warning_rule in warnings_config: + param_key = warning_rule.get("parameter") + operator = warning_rule.get("operator") + threshold = warning_rule.get("value") + severity = warning_rule.get("severity", "medium") + message = warning_rule.get("message", "") + + actual_value = activity.get(param_key) + + if actual_value is not None: + operator_func = RuleEvaluator.OPERATORS.get(operator) + if operator_func and operator_func(actual_value, threshold): + triggered_warnings.append({ + "severity": severity, + "message": message, + "parameter": param_key, + "actual_value": actual_value, + "threshold": threshold + }) + + return { + "enabled": True, + "warnings": triggered_warnings + } + + def _calculate_overall_score(self, rule_set_results: Dict) -> float: + """ + Calculates weighted overall score. + + Weights: + - Minimum Requirements: 40% + - Intensity Zones: 20% + - Periodization: 20% + - Performance: 10% + - Training Effects: 10% + """ + weights = { + "minimum_requirements": 0.4, + "intensity_zones": 0.2, + "periodization": 0.2, + "performance_indicators": 0.1, + "training_effects": 0.1 + } + + total_score = 0.0 + total_weight = 0.0 + + for rule_set_name, weight in weights.items(): + result = rule_set_results.get(rule_set_name) + if result and result.get("enabled"): + score = result.get("score", 0.5) + + # Special handling for different result types + if rule_set_name == "intensity_zones": + score = result.get("duration_quality", 0.5) + elif rule_set_name == "periodization": + score = 1.0 if result.get("recovery_adequate", False) else 0.5 + + total_score += score * weight + total_weight += weight + + return round(total_score / total_weight, 2) if total_weight > 0 else 0.5 + + def _get_quality_label(self, score: Optional[float]) -> Optional[str]: + """Converts score to quality label.""" + if score is None: + return None + + if score >= 0.9: + return "excellent" + elif score >= 0.7: + return "good" + elif score >= 0.5: + return "acceptable" + else: + return "poor" + + def _generate_recommendations(self, results: Dict) -> List[str]: + """Generates actionable recommendations.""" + recommendations = [] + + # Check minimum requirements + min_req = results["rule_set_results"].get("minimum_requirements", {}) + if min_req.get("enabled") and not min_req.get("passed"): + for failed in min_req.get("failed_rules", []): + param = failed.get("parameter") + actual = failed.get("actual_value") + expected = failed.get("expected_value") + reason = failed.get("reason", "") + symbol = failed.get("operator_symbol", "") + + recommendations.append( + f"{param}: {actual} {symbol} {expected} - {reason}" + ) + + # Check intensity zones + zone_result = results["rule_set_results"].get("intensity_zones", {}) + if zone_result.get("enabled") and zone_result.get("recommendation"): + recommendations.append(zone_result["recommendation"]) + + # Default recommendation if excellent + if results.get("quality_label") == "excellent" and not recommendations: + recommendations.append("Hervorragendes Training! Weiter so.") + + return recommendations + + def _collect_warnings(self, results: Dict) -> List[str]: + """Collects all warnings from safety checks.""" + safety_result = results["rule_set_results"].get("safety", {}) + if not safety_result.get("enabled"): + return [] + + warnings = [] + for warning in safety_result.get("warnings", []): + severity_icon = "πŸ”΄" if warning["severity"] == "high" else "⚠️" + warnings.append(f"{severity_icon} {warning['message']}") + + return warnings diff --git a/backend/routers/evaluation.py b/backend/routers/evaluation.py new file mode 100644 index 0000000..bc2370f --- /dev/null +++ b/backend/routers/evaluation.py @@ -0,0 +1,146 @@ +""" +Evaluation Endpoints - Training Type Profiles +Endpoints for activity evaluation and re-evaluation. + +Issue: #15 +Date: 2026-03-23 +""" +import logging +from typing import Optional +from fastapi import APIRouter, HTTPException, Depends + +from db import get_db, get_cursor, r2d +from auth import require_auth, require_admin +from evaluation_helper import ( + evaluate_and_save_activity, + batch_evaluate_activities, + load_parameters_registry +) + +router = APIRouter(prefix="/api/evaluation", tags=["evaluation"]) +logger = logging.getLogger(__name__) + + +@router.get("/parameters") +def list_parameters(session: dict = Depends(require_auth)): + """ + List all available training parameters. + """ + with get_db() as conn: + cur = get_cursor(conn) + parameters = load_parameters_registry(cur) + + return { + "parameters": list(parameters.values()), + "count": len(parameters) + } + + +@router.post("/activity/{activity_id}") +def evaluate_activity( + activity_id: str, + session: dict = Depends(require_auth) +): + """ + Evaluates or re-evaluates a single activity. + + Returns the evaluation result. + """ + profile_id = session['profile_id'] + + with get_db() as conn: + cur = get_cursor(conn) + + # Load activity + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE id = %s AND profile_id = %s + """, (activity_id, profile_id)) + + activity = cur.fetchone() + if not activity: + raise HTTPException(404, "Activity not found") + + activity_dict = dict(activity) + + # Evaluate + result = evaluate_and_save_activity( + cur, + activity_dict["id"], + activity_dict, + activity_dict["training_type_id"], + profile_id + ) + + if not result: + return { + "message": "No profile configured for this training type", + "evaluation": None + } + + return { + "message": "Activity evaluated", + "evaluation": result + } + + +@router.post("/batch") +def batch_evaluate( + limit: Optional[int] = None, + session: dict = Depends(require_auth) +): + """ + Re-evaluates all activities for the current user. + + Optional limit parameter for testing. + """ + profile_id = session['profile_id'] + + with get_db() as conn: + cur = get_cursor(conn) + stats = batch_evaluate_activities(cur, profile_id, limit) + + return { + "message": "Batch evaluation completed", + "stats": stats + } + + +@router.post("/batch/all") +def batch_evaluate_all(session: dict = Depends(require_admin)): + """ + Admin-only: Re-evaluates all activities for all users. + + Use with caution on large databases! + """ + with get_db() as conn: + cur = get_cursor(conn) + + # Get all profiles + cur.execute("SELECT id FROM profiles") + profiles = cur.fetchall() + + total_stats = { + "profiles": len(profiles), + "total": 0, + "evaluated": 0, + "skipped": 0, + "errors": 0 + } + + for profile in profiles: + profile_id = profile['id'] + stats = batch_evaluate_activities(cur, profile_id) + + total_stats["total"] += stats["total"] + total_stats["evaluated"] += stats["evaluated"] + total_stats["skipped"] += stats["skipped"] + total_stats["errors"] += stats["errors"] + + return { + "message": "Batch evaluation for all users completed", + "stats": total_stats + } diff --git a/backend/rule_engine.py b/backend/rule_engine.py new file mode 100644 index 0000000..e0cff89 --- /dev/null +++ b/backend/rule_engine.py @@ -0,0 +1,427 @@ +""" +Training Type Profiles - Rule Engine +Flexible rule evaluation system for activity quality assessment. + +Issue: #15 +Date: 2026-03-23 +""" +from typing import Any, Dict, List, Optional, Callable +from datetime import datetime +import logging + +logger = logging.getLogger(__name__) + + +class RuleEvaluator: + """ + Generic rule evaluator for arbitrary parameters and operators. + + Supports flexible rule definitions with various operators: + - gte, lte, gt, lt: Comparison operators + - eq, neq: Equality operators + - between: Range checks + - in, not_in: Set membership + """ + + # Operator definitions + OPERATORS: Dict[str, Callable[[Any, Any], bool]] = { + "gte": lambda actual, expected: actual is not None and actual >= expected, + "lte": lambda actual, expected: actual is not None and actual <= expected, + "gt": lambda actual, expected: actual is not None and actual > expected, + "lt": lambda actual, expected: actual is not None and actual < expected, + "eq": lambda actual, expected: actual == expected, + "neq": lambda actual, expected: actual != expected, + "between": lambda actual, expected: actual is not None and expected[0] <= actual <= expected[1], + "in": lambda actual, expected: actual in expected, + "not_in": lambda actual, expected: actual not in expected, + } + + OPERATOR_SYMBOLS = { + "gte": "β‰₯", + "lte": "≀", + "gt": ">", + "lt": "<", + "eq": "=", + "neq": "β‰ ", + "between": "⟷", + "in": "∈", + "not_in": "βˆ‰", + } + + @classmethod + def evaluate_rule( + cls, + rule: Dict, + activity: Dict, + parameters_registry: Dict[str, Dict] + ) -> Dict: + """ + Evaluates a single rule against an activity. + + Args: + rule: { + "parameter": str, + "operator": str, + "value": Any, + "weight": int, + "optional": bool, + "reason": str + } + activity: Activity data dictionary + parameters_registry: Mapping parameter_key -> config + + Returns: + { + "passed": bool, + "actual_value": Any, + "expected_value": Any, + "parameter": str, + "operator": str, + "operator_symbol": str, + "reason": str, + "weight": int, + "skipped": bool (optional), + "error": str (optional) + } + """ + param_key = rule.get("parameter") + operator = rule.get("operator") + expected_value = rule.get("value") + weight = rule.get("weight", 1) + reason = rule.get("reason", "") + optional = rule.get("optional", False) + + # Get parameter configuration + param_config = parameters_registry.get(param_key) + if not param_config: + return { + "passed": False, + "parameter": param_key, + "error": f"Unknown parameter: {param_key}" + } + + # Extract value from activity + source_field = param_config.get("source_field", param_key) + actual_value = activity.get(source_field) + + # Optional and not provided? β†’ Pass + if optional and actual_value is None: + return { + "passed": True, + "actual_value": None, + "expected_value": expected_value, + "parameter": param_key, + "operator": operator, + "operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator), + "reason": "Optional parameter not provided", + "weight": weight, + "skipped": True + } + + # Required but not provided? β†’ Fail + if actual_value is None: + return { + "passed": False, + "actual_value": None, + "expected_value": expected_value, + "parameter": param_key, + "operator": operator, + "operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator), + "reason": reason or "Required parameter missing", + "weight": weight + } + + # Apply operator + operator_func = cls.OPERATORS.get(operator) + if not operator_func: + return { + "passed": False, + "parameter": param_key, + "error": f"Unknown operator: {operator}" + } + + try: + passed = operator_func(actual_value, expected_value) + except Exception as e: + logger.error(f"[RULE-ENGINE] Error evaluating rule {param_key}: {e}") + return { + "passed": False, + "parameter": param_key, + "error": f"Evaluation error: {str(e)}" + } + + return { + "passed": passed, + "actual_value": actual_value, + "expected_value": expected_value, + "parameter": param_key, + "operator": operator, + "operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator), + "reason": reason, + "weight": weight + } + + @classmethod + def evaluate_rule_set( + cls, + rule_set: Dict, + activity: Dict, + parameters_registry: Dict[str, Dict] + ) -> Dict: + """ + Evaluates a complete rule set (e.g., minimum_requirements). + + Args: + rule_set: { + "enabled": bool, + "pass_strategy": str, + "pass_threshold": float, + "rules": [...] + } + activity: Activity data + parameters_registry: Parameter configurations + + Returns: + { + "enabled": bool, + "passed": bool, + "score": float (0-1), + "rule_results": [...], + "pass_strategy": str, + "pass_threshold": float, + "failed_rules": [...] + } + """ + if not rule_set.get("enabled", False): + return { + "enabled": False, + "passed": True, + "score": 1.0, + "rule_results": [], + "failed_rules": [] + } + + rules = rule_set.get("rules", []) + pass_strategy = rule_set.get("pass_strategy", "weighted_score") + pass_threshold = rule_set.get("pass_threshold", 0.6) + + rule_results = [] + failed_rules = [] + total_weight = 0 + passed_weight = 0 + + # Evaluate each rule + for rule in rules: + result = cls.evaluate_rule(rule, activity, parameters_registry) + rule_results.append(result) + + if result.get("skipped"): + continue + + if result.get("error"): + logger.warning(f"[RULE-ENGINE] Rule error: {result['error']}") + continue + + weight = result.get("weight", 1) + total_weight += weight + + if result["passed"]: + passed_weight += weight + else: + failed_rules.append(result) + + # Calculate score + score = passed_weight / total_weight if total_weight > 0 else 1.0 + + # Apply pass strategy + if pass_strategy == "all_must_pass": + passed = all( + r["passed"] for r in rule_results + if not r.get("skipped") and not r.get("error") + ) + elif pass_strategy == "weighted_score": + passed = score >= pass_threshold + elif pass_strategy == "at_least_n": + n = rule_set.get("at_least_n", 1) + passed_count = sum( + 1 for r in rule_results + if r["passed"] and not r.get("skipped") + ) + passed = passed_count >= n + else: + passed = False + logger.warning(f"[RULE-ENGINE] Unknown pass strategy: {pass_strategy}") + + return { + "enabled": True, + "passed": passed, + "score": round(score, 2), + "rule_results": rule_results, + "failed_rules": failed_rules, + "pass_strategy": pass_strategy, + "pass_threshold": pass_threshold + } + + +class IntensityZoneEvaluator: + """ + Evaluates heart rate zones and time distribution. + """ + + @staticmethod + def evaluate( + zone_config: Dict, + activity: Dict, + user_profile: Dict + ) -> Dict: + """ + Evaluates which HR zone the activity was in. + + Args: + zone_config: intensity_zones configuration + activity: Activity data (with hr_avg) + user_profile: User profile (with hf_max) + + Returns: + { + "enabled": bool, + "dominant_zone": str, + "avg_hr_percent": float, + "zone_color": str, + "zone_effect": str, + "duration_quality": float (0-1), + "recommendation": str + } + """ + if not zone_config.get("enabled", False): + return {"enabled": False} + + avg_hr = activity.get("hr_avg") + user_max_hr = user_profile.get("hf_max", 180) # Default 180 if not set + + if not avg_hr or not user_max_hr: + return { + "enabled": True, + "dominant_zone": "unknown", + "avg_hr_percent": None, + "recommendation": "Herzfrequenz-Daten fehlen" + } + + avg_hr_percent = (avg_hr / user_max_hr) * 100 + + # Find matching zone + zones = zone_config.get("zones", []) + dominant_zone = None + + for zone in zones: + zone_rules = zone.get("rules", []) + for rule in zone_rules: + if rule["parameter"] == "avg_hr_percent": + min_percent, max_percent = rule["value"] + if min_percent <= avg_hr_percent <= max_percent: + dominant_zone = zone + break + if dominant_zone: + break + + if not dominant_zone: + return { + "enabled": True, + "dominant_zone": "out_of_range", + "avg_hr_percent": round(avg_hr_percent, 1), + "recommendation": "Herzfrequenz außerhalb definierter Zonen" + } + + # Check duration quality + duration = activity.get("duration_min", 0) + target_duration = dominant_zone.get("target_duration_min", 30) + duration_quality = min(duration / target_duration, 1.0) if target_duration > 0 else 1.0 + + recommendation = f"Training in Zone '{dominant_zone['name']}' (Effekt: {dominant_zone['effect']})." + if duration < target_duration: + recommendation += f" FΓΌr optimale Wirkung: {target_duration}min empfohlen." + + return { + "enabled": True, + "dominant_zone": dominant_zone.get("id"), + "dominant_zone_name": dominant_zone.get("name"), + "avg_hr_percent": round(avg_hr_percent, 1), + "zone_color": dominant_zone.get("color"), + "zone_effect": dominant_zone.get("effect"), + "duration_quality": round(duration_quality, 2), + "target_duration_min": target_duration, + "actual_duration_min": duration, + "recommendation": recommendation + } + + +class TrainingEffectsEvaluator: + """ + Evaluates which abilities are trained by the activity. + """ + + @staticmethod + def evaluate( + effects_config: Dict, + activity: Dict, + intensity_zone_result: Optional[Dict] = None + ) -> Dict: + """ + Evaluates training effects (abilities trained). + + Args: + effects_config: training_effects configuration + activity: Activity data + intensity_zone_result: Result from intensity zone evaluation + + Returns: + { + "enabled": bool, + "abilities_trained": [...], + "total_training_load": float + } + """ + if not effects_config.get("enabled", False): + return {"enabled": False} + + abilities_trained = [] + + # Use default effects if no conditional matching + default_effects = effects_config.get("default_effects", {}) + primary_abilities = default_effects.get("primary_abilities", []) + secondary_abilities = default_effects.get("secondary_abilities", []) + + # Calculate quality factor (simplified for now) + quality_factor = 1.0 + + # Primary abilities + for ability in primary_abilities: + abilities_trained.append({ + "category": ability["category"], + "ability": ability["ability"], + "intensity": ability["intensity"], + "quality": quality_factor, + "contribution": ability["intensity"] * quality_factor, + "type": "primary" + }) + + # Secondary abilities + for ability in secondary_abilities: + abilities_trained.append({ + "category": ability["category"], + "ability": ability["ability"], + "intensity": ability["intensity"], + "quality": quality_factor * 0.7, # Secondary = 70% + "contribution": ability["intensity"] * quality_factor * 0.7, + "type": "secondary" + }) + + total_training_load = sum(a["contribution"] for a in abilities_trained) + + return { + "enabled": True, + "abilities_trained": abilities_trained, + "total_training_load": round(total_training_load, 2), + "metabolic_focus": effects_config.get("metabolic_focus", []), + "muscle_groups": effects_config.get("muscle_groups", []) + } -- 2.43.0 From e11953736dce863c0b3cfa8fe46ed66cbe2a722a Mon Sep 17 00:00:00 2001 From: Lars Date: Mon, 23 Mar 2026 10:53:13 +0100 Subject: [PATCH 02/29] feat: Training Type Profiles Phase 1.2 - Auto-evaluation (#15) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Automatic evaluation on activity INSERT/UPDATE: - create_activity(): Evaluate after manual creation - update_activity(): Re-evaluate after manual update - import_activity_csv(): Evaluate after CSV import (INSERT + UPDATE) - bulk_categorize_activities(): Evaluate after bulk training type assignment All evaluation calls wrapped in try/except to prevent activity operations from failing if evaluation encounters an error. Only activities with training_type_id assigned are evaluated. Phase 1.2 complete βœ… ## Next Steps (Phase 2): Admin-UI for training type profile configuration Co-Authored-By: Claude Opus 4.6 --- backend/routers/activity.py | 122 +++++++++++++++++++++++++++++++++++- 1 file changed, 120 insertions(+), 2 deletions(-) diff --git a/backend/routers/activity.py b/backend/routers/activity.py index a37b0bb..b718134 100644 --- a/backend/routers/activity.py +++ b/backend/routers/activity.py @@ -16,6 +16,7 @@ from auth import require_auth, check_feature_access, increment_feature_usage from models import ActivityEntry from routers.profiles import get_pid from feature_logger import log_feature_usage +from evaluation_helper import evaluate_and_save_activity router = APIRouter(prefix="/api/activity", tags=["activity"]) logger = logging.getLogger(__name__) @@ -64,6 +65,26 @@ def create_activity(e: ActivityEntry, x_profile_id: Optional[str]=Header(default d['kcal_active'],d['kcal_resting'],d['hr_avg'],d['hr_max'],d['distance_km'], d['rpe'],d['source'],d['notes'])) + # Phase 1.2: Auto-evaluation after INSERT + # Load the activity data to evaluate + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE id = %s + """, (eid,)) + activity_row = cur.fetchone() + if activity_row: + activity_dict = dict(activity_row) + training_type_id = activity_dict.get("training_type_id") + if training_type_id: + try: + evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) + logger.info(f"[AUTO-EVAL] Evaluated activity {eid} on INSERT") + except Exception as eval_error: + logger.error(f"[AUTO-EVAL] Failed to evaluate activity {eid}: {eval_error}") + # Phase 2: Increment usage counter (always for new entries) increment_feature_usage(pid, 'activity_entries') @@ -79,6 +100,27 @@ def update_activity(eid: str, e: ActivityEntry, x_profile_id: Optional[str]=Head cur = get_cursor(conn) cur.execute(f"UPDATE activity_log SET {', '.join(f'{k}=%s' for k in d)} WHERE id=%s AND profile_id=%s", list(d.values())+[eid,pid]) + + # Phase 1.2: Auto-evaluation after UPDATE + # Load the updated activity data to evaluate + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE id = %s + """, (eid,)) + activity_row = cur.fetchone() + if activity_row: + activity_dict = dict(activity_row) + training_type_id = activity_dict.get("training_type_id") + if training_type_id: + try: + evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) + logger.info(f"[AUTO-EVAL] Re-evaluated activity {eid} on UPDATE") + except Exception as eval_error: + logger.error(f"[AUTO-EVAL] Failed to re-evaluate activity {eid}: {eval_error}") + return {"id":eid} @@ -214,6 +256,30 @@ def bulk_categorize_activities( """, (training_type_id, training_category, training_subcategory, pid, activity_type)) updated_count = cur.rowcount + # Phase 1.2: Auto-evaluation after bulk categorization + # Load all activities that were just updated and evaluate them + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE profile_id = %s + AND activity_type = %s + AND training_type_id = %s + """, (pid, activity_type, training_type_id)) + + activities_to_evaluate = cur.fetchall() + evaluated_count = 0 + for activity_row in activities_to_evaluate: + activity_dict = dict(activity_row) + try: + evaluate_and_save_activity(cur, activity_dict["id"], activity_dict, training_type_id, pid) + evaluated_count += 1 + except Exception as eval_error: + logger.warning(f"[AUTO-EVAL] Failed to evaluate bulk-categorized activity {activity_dict['id']}: {eval_error}") + + logger.info(f"[AUTO-EVAL] Evaluated {evaluated_count}/{updated_count} bulk-categorized activities") + # Save mapping for future imports (upsert) cur.execute(""" INSERT INTO activity_type_mappings (activity_type, training_type_id, profile_id, source, updated_at) @@ -275,6 +341,7 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional if existing: # Update existing entry (e.g., to add training type mapping) + existing_id = existing['id'] cur.execute(""" UPDATE activity_log SET end_time = %s, @@ -297,22 +364,73 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional tf(row.get('Max. Herzfrequenz (count/min)','')), tf(row.get('Distanz (km)','')), training_type_id, training_category, training_subcategory, - existing['id'] + existing_id )) skipped += 1 # Count as skipped (not newly inserted) + + # Phase 1.2: Auto-evaluation after CSV import UPDATE + if training_type_id: + try: + # Build activity dict for evaluation + activity_dict = { + "id": existing_id, + "profile_id": pid, + "date": date, + "training_type_id": training_type_id, + "duration_min": duration_min, + "hr_avg": tf(row.get('Durchschn. Herzfrequenz (count/min)','')), + "hr_max": tf(row.get('Max. Herzfrequenz (count/min)','')), + "distance_km": tf(row.get('Distanz (km)','')), + "kcal_active": kj(row.get('Aktive Energie (kJ)','')), + "kcal_resting": kj(row.get('RuheeintrΓ€ge (kJ)','')), + "rpe": None, + "pace_min_per_km": None, + "cadence": None, + "elevation_gain": None + } + evaluate_and_save_activity(cur, existing_id, activity_dict, training_type_id, pid) + logger.debug(f"[AUTO-EVAL] Re-evaluated updated activity {existing_id}") + except Exception as eval_error: + logger.warning(f"[AUTO-EVAL] Failed to re-evaluate updated activity {existing_id}: {eval_error}") else: # Insert new entry + new_id = str(uuid.uuid4()) cur.execute("""INSERT INTO activity_log (id,profile_id,date,start_time,end_time,activity_type,duration_min,kcal_active,kcal_resting, hr_avg,hr_max,distance_km,source,training_type_id,training_category,training_subcategory,created) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,'apple_health',%s,%s,%s,CURRENT_TIMESTAMP)""", - (str(uuid.uuid4()),pid,date,start,row.get('End',''),wtype,duration_min, + (new_id,pid,date,start,row.get('End',''),wtype,duration_min, kj(row.get('Aktive Energie (kJ)','')),kj(row.get('RuheeintrΓ€ge (kJ)','')), tf(row.get('Durchschn. Herzfrequenz (count/min)','')), tf(row.get('Max. Herzfrequenz (count/min)','')), tf(row.get('Distanz (km)','')), training_type_id,training_category,training_subcategory)) inserted+=1 + + # Phase 1.2: Auto-evaluation after CSV import INSERT + if training_type_id: + try: + # Build activity dict for evaluation + activity_dict = { + "id": new_id, + "profile_id": pid, + "date": date, + "training_type_id": training_type_id, + "duration_min": duration_min, + "hr_avg": tf(row.get('Durchschn. Herzfrequenz (count/min)','')), + "hr_max": tf(row.get('Max. Herzfrequenz (count/min)','')), + "distance_km": tf(row.get('Distanz (km)','')), + "kcal_active": kj(row.get('Aktive Energie (kJ)','')), + "kcal_resting": kj(row.get('RuheeintrΓ€ge (kJ)','')), + "rpe": None, + "pace_min_per_km": None, + "cadence": None, + "elevation_gain": None + } + evaluate_and_save_activity(cur, new_id, activity_dict, training_type_id, pid) + logger.debug(f"[AUTO-EVAL] Evaluated imported activity {new_id}") + except Exception as eval_error: + logger.warning(f"[AUTO-EVAL] Failed to evaluate imported activity {new_id}: {eval_error}") except Exception as e: logger.warning(f"Import row failed: {e}") skipped+=1 -- 2.43.0 From edd15dd556bcf4e7a6e83b7d6726eee6a0c35485 Mon Sep 17 00:00:00 2001 From: Lars Date: Mon, 23 Mar 2026 10:59:23 +0100 Subject: [PATCH 03/29] fix: defensive evaluation import to prevent startup crash (#15) Problem: Backend crashed on startup due to evaluation import failure Solution: Wrap evaluation_helper import in try/except Changes: - Import evaluation_helper with error handling - Add EVALUATION_AVAILABLE flag - All evaluation calls now check flag before executing - System remains functional even if evaluation system unavailable This prevents backend crashes if: - Migrations haven't run yet - Dependencies are missing - Import errors occur Co-Authored-By: Claude Opus 4.6 --- backend/routers/activity.py | 129 +++++++++++++++++++----------------- 1 file changed, 70 insertions(+), 59 deletions(-) diff --git a/backend/routers/activity.py b/backend/routers/activity.py index b718134..a7a2e64 100644 --- a/backend/routers/activity.py +++ b/backend/routers/activity.py @@ -16,7 +16,15 @@ from auth import require_auth, check_feature_access, increment_feature_usage from models import ActivityEntry from routers.profiles import get_pid from feature_logger import log_feature_usage -from evaluation_helper import evaluate_and_save_activity + +# Evaluation import with error handling (Phase 1.2) +try: + from evaluation_helper import evaluate_and_save_activity + EVALUATION_AVAILABLE = True +except Exception as e: + logger.warning(f"[AUTO-EVAL] Evaluation system not available: {e}") + EVALUATION_AVAILABLE = False + evaluate_and_save_activity = None router = APIRouter(prefix="/api/activity", tags=["activity"]) logger = logging.getLogger(__name__) @@ -66,24 +74,25 @@ def create_activity(e: ActivityEntry, x_profile_id: Optional[str]=Header(default d['rpe'],d['source'],d['notes'])) # Phase 1.2: Auto-evaluation after INSERT - # Load the activity data to evaluate - cur.execute(""" - SELECT id, profile_id, date, training_type_id, duration_min, - hr_avg, hr_max, distance_km, kcal_active, kcal_resting, - rpe, pace_min_per_km, cadence, elevation_gain - FROM activity_log - WHERE id = %s - """, (eid,)) - activity_row = cur.fetchone() - if activity_row: - activity_dict = dict(activity_row) - training_type_id = activity_dict.get("training_type_id") - if training_type_id: - try: - evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) - logger.info(f"[AUTO-EVAL] Evaluated activity {eid} on INSERT") - except Exception as eval_error: - logger.error(f"[AUTO-EVAL] Failed to evaluate activity {eid}: {eval_error}") + if EVALUATION_AVAILABLE: + # Load the activity data to evaluate + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE id = %s + """, (eid,)) + activity_row = cur.fetchone() + if activity_row: + activity_dict = dict(activity_row) + training_type_id = activity_dict.get("training_type_id") + if training_type_id: + try: + evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) + logger.info(f"[AUTO-EVAL] Evaluated activity {eid} on INSERT") + except Exception as eval_error: + logger.error(f"[AUTO-EVAL] Failed to evaluate activity {eid}: {eval_error}") # Phase 2: Increment usage counter (always for new entries) increment_feature_usage(pid, 'activity_entries') @@ -102,24 +111,25 @@ def update_activity(eid: str, e: ActivityEntry, x_profile_id: Optional[str]=Head list(d.values())+[eid,pid]) # Phase 1.2: Auto-evaluation after UPDATE - # Load the updated activity data to evaluate - cur.execute(""" - SELECT id, profile_id, date, training_type_id, duration_min, - hr_avg, hr_max, distance_km, kcal_active, kcal_resting, - rpe, pace_min_per_km, cadence, elevation_gain - FROM activity_log - WHERE id = %s - """, (eid,)) - activity_row = cur.fetchone() - if activity_row: - activity_dict = dict(activity_row) - training_type_id = activity_dict.get("training_type_id") - if training_type_id: - try: - evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) - logger.info(f"[AUTO-EVAL] Re-evaluated activity {eid} on UPDATE") - except Exception as eval_error: - logger.error(f"[AUTO-EVAL] Failed to re-evaluate activity {eid}: {eval_error}") + if EVALUATION_AVAILABLE: + # Load the updated activity data to evaluate + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE id = %s + """, (eid,)) + activity_row = cur.fetchone() + if activity_row: + activity_dict = dict(activity_row) + training_type_id = activity_dict.get("training_type_id") + if training_type_id: + try: + evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) + logger.info(f"[AUTO-EVAL] Re-evaluated activity {eid} on UPDATE") + except Exception as eval_error: + logger.error(f"[AUTO-EVAL] Failed to re-evaluate activity {eid}: {eval_error}") return {"id":eid} @@ -257,28 +267,29 @@ def bulk_categorize_activities( updated_count = cur.rowcount # Phase 1.2: Auto-evaluation after bulk categorization - # Load all activities that were just updated and evaluate them - cur.execute(""" - SELECT id, profile_id, date, training_type_id, duration_min, - hr_avg, hr_max, distance_km, kcal_active, kcal_resting, - rpe, pace_min_per_km, cadence, elevation_gain - FROM activity_log - WHERE profile_id = %s - AND activity_type = %s - AND training_type_id = %s - """, (pid, activity_type, training_type_id)) + if EVALUATION_AVAILABLE: + # Load all activities that were just updated and evaluate them + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE profile_id = %s + AND activity_type = %s + AND training_type_id = %s + """, (pid, activity_type, training_type_id)) - activities_to_evaluate = cur.fetchall() - evaluated_count = 0 - for activity_row in activities_to_evaluate: - activity_dict = dict(activity_row) - try: - evaluate_and_save_activity(cur, activity_dict["id"], activity_dict, training_type_id, pid) - evaluated_count += 1 - except Exception as eval_error: - logger.warning(f"[AUTO-EVAL] Failed to evaluate bulk-categorized activity {activity_dict['id']}: {eval_error}") + activities_to_evaluate = cur.fetchall() + evaluated_count = 0 + for activity_row in activities_to_evaluate: + activity_dict = dict(activity_row) + try: + evaluate_and_save_activity(cur, activity_dict["id"], activity_dict, training_type_id, pid) + evaluated_count += 1 + except Exception as eval_error: + logger.warning(f"[AUTO-EVAL] Failed to evaluate bulk-categorized activity {activity_dict['id']}: {eval_error}") - logger.info(f"[AUTO-EVAL] Evaluated {evaluated_count}/{updated_count} bulk-categorized activities") + logger.info(f"[AUTO-EVAL] Evaluated {evaluated_count}/{updated_count} bulk-categorized activities") # Save mapping for future imports (upsert) cur.execute(""" @@ -369,7 +380,7 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional skipped += 1 # Count as skipped (not newly inserted) # Phase 1.2: Auto-evaluation after CSV import UPDATE - if training_type_id: + if EVALUATION_AVAILABLE and training_type_id: try: # Build activity dict for evaluation activity_dict = { @@ -408,7 +419,7 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional inserted+=1 # Phase 1.2: Auto-evaluation after CSV import INSERT - if training_type_id: + if EVALUATION_AVAILABLE and training_type_id: try: # Build activity dict for evaluation activity_dict = { -- 2.43.0 From ca7d9b2e3fa15f23776517209e66a4600c9a8928 Mon Sep 17 00:00:00 2001 From: Lars Date: Mon, 23 Mar 2026 11:01:53 +0100 Subject: [PATCH 04/29] fix: add missing validation_rules in migration 013 (#15) SQL Error: VALUES lists must all be the same length (line 130) Cause: kcal_per_km row was missing validation_rules JSONB value Fixed: Added validation_rules '{"min": 0, "max": 1000}'::jsonb All 16 parameter rows now have correct 10 columns: key, name_de, name_en, category, data_type, unit, source_field, validation_rules, description_de, description_en Co-Authored-By: Claude Opus 4.6 --- backend/migrations/013_training_parameters.sql | 1 + 1 file changed, 1 insertion(+) diff --git a/backend/migrations/013_training_parameters.sql b/backend/migrations/013_training_parameters.sql index 9f80bcf..2784057 100644 --- a/backend/migrations/013_training_parameters.sql +++ b/backend/migrations/013_training_parameters.sql @@ -128,6 +128,7 @@ INSERT INTO training_parameters (key, name_de, name_en, category, data_type, uni 'Average heart rate as percentage of max heart rate'), ('kcal_per_km', 'Kalorien pro km', 'Calories per km', 'performance', 'float', 'kcal/km', 'kcal_per_km', + '{"min": 0, "max": 1000}'::jsonb, 'Kalorienverbrauch pro Kilometer', 'Calorie burn per kilometer'); -- 2.43.0 From d7145874cf217454e4f91a5eb3c5f2e597efa015 Mon Sep 17 00:00:00 2001 From: Lars Date: Mon, 23 Mar 2026 11:50:40 +0100 Subject: [PATCH 05/29] feat: Training Type Profiles Phase 2.1 - Backend Profile Management (#15) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Admin endpoints for profile configuration: - Extended TrainingTypeCreate/Update models with profile field - Added profile column to all SELECT queries - Profile templates for Running, Meditation, Strength Training - Template endpoints: list, get, apply - Profile stats endpoint (configured/unconfigured count) New file: profile_templates.py - TEMPLATE_RUNNING: Endurance-focused with HR zones - TEMPLATE_MEDITATION: Mental-focused (low HR ≀ instead of β‰₯) - TEMPLATE_STRENGTH: Strength-focused API Endpoints: - GET /api/admin/training-types/profiles/templates - GET /api/admin/training-types/profiles/templates/{key} - POST /api/admin/training-types/{id}/profile/apply-template - GET /api/admin/training-types/profiles/stats Next: Frontend Admin-UI (ProfileEditor component) Co-Authored-By: Claude Opus 4.6 --- backend/profile_templates.py | 450 ++++++++++++++++++++++++ backend/routers/admin_training_types.py | 139 +++++++- 2 files changed, 583 insertions(+), 6 deletions(-) create mode 100644 backend/profile_templates.py diff --git a/backend/profile_templates.py b/backend/profile_templates.py new file mode 100644 index 0000000..0690292 --- /dev/null +++ b/backend/profile_templates.py @@ -0,0 +1,450 @@ +""" +Training Type Profile Templates +Pre-configured profiles for common training types. + +Issue: #15 +Date: 2026-03-23 +""" + +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# TEMPLATE: LAUFEN (Running) - Ausdauer-fokussiert +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +TEMPLATE_RUNNING = { + "version": "1.0", + "name": "Laufen (Standard)", + "description": "Ausdauerlauf mit Herzfrequenz-Zonen", + + "rule_sets": { + "minimum_requirements": { + "enabled": True, + "pass_strategy": "weighted_score", + "pass_threshold": 0.6, + "rules": [ + { + "parameter": "duration_min", + "operator": "gte", + "value": 15, + "weight": 5, + "optional": False, + "reason": "Mindestens 15 Minuten fΓΌr Trainingseffekt" + }, + { + "parameter": "avg_hr", + "operator": "gte", + "value": 100, + "weight": 3, + "optional": False, + "reason": "Puls muss fΓΌr Ausdauerreiz erhΓΆht sein" + }, + { + "parameter": "distance_km", + "operator": "gte", + "value": 1.0, + "weight": 2, + "optional": False, + "reason": "Mindestens 1 km Distanz" + } + ] + }, + + "intensity_zones": { + "enabled": True, + "zones": [ + { + "id": "regeneration", + "name": "Regeneration", + "color": "#4CAF50", + "effect": "Aktive Erholung", + "target_duration_min": 30, + "rules": [ + { + "parameter": "avg_hr_percent", + "operator": "between", + "value": [50, 60] + } + ] + }, + { + "id": "grundlagenausdauer", + "name": "Grundlagenausdauer", + "color": "#2196F3", + "effect": "Fettverbrennung, aerobe Basis", + "target_duration_min": 45, + "rules": [ + { + "parameter": "avg_hr_percent", + "operator": "between", + "value": [60, 70] + } + ] + }, + { + "id": "entwicklungsbereich", + "name": "Entwicklungsbereich", + "color": "#FF9800", + "effect": "VO2max-Training, Laktattoleranz", + "target_duration_min": 30, + "rules": [ + { + "parameter": "avg_hr_percent", + "operator": "between", + "value": [70, 80] + } + ] + }, + { + "id": "schwellentraining", + "name": "Schwellentraining", + "color": "#F44336", + "effect": "Anaerobe Schwelle, Wettkampftempo", + "target_duration_min": 20, + "rules": [ + { + "parameter": "avg_hr_percent", + "operator": "between", + "value": [80, 90] + } + ] + } + ] + }, + + "training_effects": { + "enabled": True, + "default_effects": { + "primary_abilities": [ + { + "category": "konditionell", + "ability": "ausdauer", + "intensity": 5 + } + ], + "secondary_abilities": [ + { + "category": "konditionell", + "ability": "schnelligkeit", + "intensity": 2 + }, + { + "category": "koordinativ", + "ability": "rhythmus", + "intensity": 3 + }, + { + "category": "psychisch", + "ability": "willenskraft", + "intensity": 4 + } + ] + }, + "metabolic_focus": ["aerobic", "fat_oxidation"], + "muscle_groups": ["legs", "core", "cardiovascular"] + }, + + "periodization": { + "enabled": True, + "frequency": { + "per_week_optimal": 3, + "per_week_max": 5 + }, + "recovery": { + "min_hours_between": 24 + } + }, + + "performance_indicators": { + "enabled": False + }, + + "safety": { + "enabled": True, + "warnings": [ + { + "parameter": "avg_hr_percent", + "operator": "gt", + "value": 95, + "severity": "high", + "message": "Herzfrequenz zu hoch - Überbelastungsrisiko" + }, + { + "parameter": "duration_min", + "operator": "gt", + "value": 180, + "severity": "medium", + "message": "Sehr lange Einheit - achte auf Regeneration" + } + ] + } + } +} + + +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# TEMPLATE: MEDITATION - Mental-fokussiert (≀ statt β‰₯ bei HR!) +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +TEMPLATE_MEDITATION = { + "version": "1.0", + "name": "Meditation (Standard)", + "description": "Mentales Training mit niedrigem Puls", + + "rule_sets": { + "minimum_requirements": { + "enabled": True, + "pass_strategy": "weighted_score", + "pass_threshold": 0.6, + "rules": [ + { + "parameter": "duration_min", + "operator": "gte", + "value": 5, + "weight": 5, + "optional": False, + "reason": "Mindestens 5 Minuten fΓΌr Entspannungseffekt" + }, + { + "parameter": "avg_hr", + "operator": "lte", + "value": 80, + "weight": 4, + "optional": False, + "reason": "Niedriger Puls zeigt Entspannung an" + } + ] + }, + + "intensity_zones": { + "enabled": True, + "zones": [ + { + "id": "deep_relaxation", + "name": "Tiefenentspannung", + "color": "#4CAF50", + "effect": "Parasympathikus-Aktivierung", + "target_duration_min": 10, + "rules": [ + { + "parameter": "avg_hr_percent", + "operator": "between", + "value": [35, 45] + } + ] + }, + { + "id": "light_meditation", + "name": "Leichte Meditation", + "color": "#2196F3", + "effect": "Achtsamkeit, Fokus", + "target_duration_min": 15, + "rules": [ + { + "parameter": "avg_hr_percent", + "operator": "between", + "value": [45, 55] + } + ] + } + ] + }, + + "training_effects": { + "enabled": True, + "default_effects": { + "primary_abilities": [ + { + "category": "kognitiv", + "ability": "konzentration", + "intensity": 5 + }, + { + "category": "psychisch", + "ability": "stressresistenz", + "intensity": 5 + } + ], + "secondary_abilities": [ + { + "category": "kognitiv", + "ability": "wahrnehmung", + "intensity": 4 + }, + { + "category": "psychisch", + "ability": "selbstvertrauen", + "intensity": 3 + } + ] + }, + "metabolic_focus": ["parasympathetic_activation"], + "muscle_groups": [] + }, + + "periodization": { + "enabled": True, + "frequency": { + "per_week_optimal": 5, + "per_week_max": 7 + }, + "recovery": { + "min_hours_between": 0 + } + }, + + "performance_indicators": { + "enabled": False + }, + + "safety": { + "enabled": True, + "warnings": [ + { + "parameter": "avg_hr", + "operator": "gt", + "value": 100, + "severity": "medium", + "message": "Herzfrequenz zu hoch fΓΌr Meditation - bist du wirklich entspannt?" + } + ] + } + } +} + + +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# TEMPLATE: KRAFTTRAINING - Kraft-fokussiert +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +TEMPLATE_STRENGTH = { + "version": "1.0", + "name": "Krafttraining (Standard)", + "description": "Krafttraining mit moderater Herzfrequenz", + + "rule_sets": { + "minimum_requirements": { + "enabled": True, + "pass_strategy": "weighted_score", + "pass_threshold": 0.5, + "rules": [ + { + "parameter": "duration_min", + "operator": "gte", + "value": 20, + "weight": 5, + "optional": False, + "reason": "Mindestens 20 Minuten fΓΌr Muskelreiz" + }, + { + "parameter": "kcal_active", + "operator": "gte", + "value": 100, + "weight": 2, + "optional": True, + "reason": "Mindest-Kalorienverbrauch" + } + ] + }, + + "intensity_zones": { + "enabled": False + }, + + "training_effects": { + "enabled": True, + "default_effects": { + "primary_abilities": [ + { + "category": "konditionell", + "ability": "kraft", + "intensity": 5 + } + ], + "secondary_abilities": [ + { + "category": "koordinativ", + "ability": "differenzierung", + "intensity": 3 + }, + { + "category": "psychisch", + "ability": "willenskraft", + "intensity": 4 + } + ] + }, + "metabolic_focus": ["anaerobic", "muscle_growth"], + "muscle_groups": ["full_body"] + }, + + "periodization": { + "enabled": True, + "frequency": { + "per_week_optimal": 3, + "per_week_max": 5 + }, + "recovery": { + "min_hours_between": 48 + } + }, + + "performance_indicators": { + "enabled": False + }, + + "safety": { + "enabled": True, + "warnings": [] + } + } +} + + +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# TEMPLATE REGISTRY +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +TEMPLATES = { + "running": { + "name_de": "Laufen", + "name_en": "Running", + "icon": "πŸƒ", + "categories": ["cardio", "running"], + "template": TEMPLATE_RUNNING + }, + "meditation": { + "name_de": "Meditation", + "name_en": "Meditation", + "icon": "🧘", + "categories": ["geist", "meditation"], + "template": TEMPLATE_MEDITATION + }, + "strength": { + "name_de": "Krafttraining", + "name_en": "Strength Training", + "icon": "πŸ’ͺ", + "categories": ["kraft", "krafttraining"], + "template": TEMPLATE_STRENGTH + } +} + + +def get_template(template_key: str) -> dict: + """Get profile template by key.""" + template_info = TEMPLATES.get(template_key) + if not template_info: + return None + return template_info["template"] + + +def list_templates() -> list: + """List all available templates.""" + return [ + { + "key": key, + "name_de": info["name_de"], + "name_en": info["name_en"], + "icon": info["icon"], + "categories": info["categories"] + } + for key, info in TEMPLATES.items() + ] diff --git a/backend/routers/admin_training_types.py b/backend/routers/admin_training_types.py index 49899ed..3366ed0 100644 --- a/backend/routers/admin_training_types.py +++ b/backend/routers/admin_training_types.py @@ -11,6 +11,7 @@ from psycopg2.extras import Json from db import get_db, get_cursor, r2d from auth import require_auth, require_admin +from profile_templates import list_templates, get_template router = APIRouter(prefix="/api/admin/training-types", tags=["admin", "training-types"]) logger = logging.getLogger(__name__) @@ -26,6 +27,7 @@ class TrainingTypeCreate(BaseModel): description_en: Optional[str] = None sort_order: int = 0 abilities: Optional[dict] = None + profile: Optional[dict] = None # Training Type Profile (Phase 2 #15) class TrainingTypeUpdate(BaseModel): @@ -38,6 +40,7 @@ class TrainingTypeUpdate(BaseModel): description_en: Optional[str] = None sort_order: Optional[int] = None abilities: Optional[dict] = None + profile: Optional[dict] = None # Training Type Profile (Phase 2 #15) @router.get("") @@ -51,7 +54,7 @@ def list_training_types_admin(session: dict = Depends(require_admin)): cur.execute(""" SELECT id, category, subcategory, name_de, name_en, icon, description_de, description_en, sort_order, abilities, - created_at + profile, created_at FROM training_types ORDER BY sort_order, category, subcategory """) @@ -68,7 +71,7 @@ def get_training_type(type_id: int, session: dict = Depends(require_admin)): cur.execute(""" SELECT id, category, subcategory, name_de, name_en, icon, description_de, description_en, sort_order, abilities, - created_at + profile, created_at FROM training_types WHERE id = %s """, (type_id,)) @@ -86,14 +89,15 @@ def create_training_type(data: TrainingTypeCreate, session: dict = Depends(requi with get_db() as conn: cur = get_cursor(conn) - # Convert abilities dict to JSONB + # Convert abilities and profile dict to JSONB abilities_json = data.abilities if data.abilities else {} + profile_json = data.profile if data.profile else None cur.execute(""" INSERT INTO training_types (category, subcategory, name_de, name_en, icon, - description_de, description_en, sort_order, abilities) - VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s) + description_de, description_en, sort_order, abilities, profile) + VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) RETURNING id """, ( data.category, @@ -104,7 +108,8 @@ def create_training_type(data: TrainingTypeCreate, session: dict = Depends(requi data.description_de, data.description_en, data.sort_order, - Json(abilities_json) + Json(abilities_json), + Json(profile_json) if profile_json else None )) new_id = cur.fetchone()['id'] @@ -155,6 +160,9 @@ def update_training_type( if data.abilities is not None: updates.append("abilities = %s") values.append(Json(data.abilities)) + if data.profile is not None: + updates.append("profile = %s") + values.append(Json(data.profile)) if not updates: raise HTTPException(400, "No fields to update") @@ -280,3 +288,122 @@ def get_abilities_taxonomy(session: dict = Depends(require_auth)): } return taxonomy + + +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +# TRAINING TYPE PROFILES - Phase 2 (#15) +# ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +@router.get("/profiles/templates") +def list_profile_templates(session: dict = Depends(require_admin)): + """ + List all available profile templates. + + Returns templates for common training types (Running, Meditation, Strength, etc.) + """ + return list_templates() + + +@router.get("/profiles/templates/{template_key}") +def get_profile_template(template_key: str, session: dict = Depends(require_admin)): + """ + Get a specific profile template by key. + + Keys: running, meditation, strength + """ + template = get_template(template_key) + if not template: + raise HTTPException(404, f"Template '{template_key}' not found") + return template + + +@router.post("/{type_id}/profile/apply-template") +def apply_profile_template( + type_id: int, + data: dict, + session: dict = Depends(require_admin) +): + """ + Apply a profile template to a training type. + + Body: { "template_key": "running" } + """ + template_key = data.get("template_key") + if not template_key: + raise HTTPException(400, "template_key required") + + template = get_template(template_key) + if not template: + raise HTTPException(404, f"Template '{template_key}' not found") + + # Apply template to training type + with get_db() as conn: + cur = get_cursor(conn) + + # Check if training type exists + cur.execute("SELECT id, name_de FROM training_types WHERE id = %s", (type_id,)) + training_type = cur.fetchone() + if not training_type: + raise HTTPException(404, "Training type not found") + + # Update profile + cur.execute(""" + UPDATE training_types + SET profile = %s + WHERE id = %s + """, (Json(template), type_id)) + + logger.info(f"[ADMIN] Applied template '{template_key}' to training type {type_id} ({training_type['name_de']})") + + return { + "message": f"Template '{template_key}' applied successfully", + "training_type_id": type_id, + "training_type_name": training_type['name_de'], + "template_key": template_key + } + + +@router.get("/profiles/stats") +def get_profile_stats(session: dict = Depends(require_admin)): + """ + Get statistics about configured profiles. + + Returns count of training types with/without profiles. + """ + with get_db() as conn: + cur = get_cursor(conn) + + cur.execute(""" + SELECT + COUNT(*) as total, + COUNT(profile) as configured, + COUNT(*) - COUNT(profile) as unconfigured + FROM training_types + """) + stats = cur.fetchone() + + # Get list of types with profiles + cur.execute(""" + SELECT id, name_de, category, subcategory + FROM training_types + WHERE profile IS NOT NULL + ORDER BY name_de + """) + configured_types = [r2d(r) for r in cur.fetchall()] + + # Get list of types without profiles + cur.execute(""" + SELECT id, name_de, category, subcategory + FROM training_types + WHERE profile IS NULL + ORDER BY name_de + """) + unconfigured_types = [r2d(r) for r in cur.fetchall()] + + return { + "total": stats['total'], + "configured": stats['configured'], + "unconfigured": stats['unconfigured'], + "configured_types": configured_types, + "unconfigured_types": unconfigured_types + } -- 2.43.0 From 1d252b5299aa1bb7ad0b541718478df23fad2855 Mon Sep 17 00:00:00 2001 From: Lars Date: Mon, 23 Mar 2026 11:53:58 +0100 Subject: [PATCH 06/29] feat: Training Type Profiles Phase 2.2 - Frontend Admin-UI (#15) New admin page for profile configuration: - AdminTrainingProfiles.jsx: Profile management interface - Statistics dashboard (configured/unconfigured count) - Training types list with profile status badges - JSON-based profile editor (modal) - One-click template application (Running, Meditation, Strength) - Batch re-evaluation button for existing activities - Link in AdminPanel under "Trainingstypen (v9d)" Features: - Apply templates with one click - Edit profiles as JSON in modal - Real-time validation - Success/error messages - Responsive layout Route: /admin/training-profiles Next: Test and iterate, then Phase 3 (User-UI for viewing results) Co-Authored-By: Claude Opus 4.6 --- frontend/src/App.jsx | 2 + frontend/src/pages/AdminPanel.jsx | 5 + frontend/src/pages/AdminTrainingProfiles.jsx | 297 +++++++++++++++++++ 3 files changed, 304 insertions(+) create mode 100644 frontend/src/pages/AdminTrainingProfiles.jsx diff --git a/frontend/src/App.jsx b/frontend/src/App.jsx index a83ecf1..eb41e9d 100644 --- a/frontend/src/App.jsx +++ b/frontend/src/App.jsx @@ -29,6 +29,7 @@ import AdminCouponsPage from './pages/AdminCouponsPage' import AdminUserRestrictionsPage from './pages/AdminUserRestrictionsPage' import AdminTrainingTypesPage from './pages/AdminTrainingTypesPage' import AdminActivityMappingsPage from './pages/AdminActivityMappingsPage' +import AdminTrainingProfiles from './pages/AdminTrainingProfiles' import SubscriptionPage from './pages/SubscriptionPage' import SleepPage from './pages/SleepPage' import RestDaysPage from './pages/RestDaysPage' @@ -180,6 +181,7 @@ function AppShell() { }/> }/> }/> + }/> }/> diff --git a/frontend/src/pages/AdminPanel.jsx b/frontend/src/pages/AdminPanel.jsx index 94b9822..dc23c50 100644 --- a/frontend/src/pages/AdminPanel.jsx +++ b/frontend/src/pages/AdminPanel.jsx @@ -444,6 +444,11 @@ export default function AdminPanel() { πŸ”— Activity-Mappings (lernendes System) + + + diff --git a/frontend/src/pages/AdminTrainingProfiles.jsx b/frontend/src/pages/AdminTrainingProfiles.jsx new file mode 100644 index 0000000..0173a06 --- /dev/null +++ b/frontend/src/pages/AdminTrainingProfiles.jsx @@ -0,0 +1,297 @@ +import { useState, useEffect } from 'react' +import { api } from '../utils/api' +import '../app.css' + +export default function AdminTrainingProfiles() { + const [stats, setStats] = useState(null) + const [trainingTypes, setTrainingTypes] = useState([]) + const [templates, setTemplates] = useState([]) + const [selectedType, setSelectedType] = useState(null) + const [editingProfile, setEditingProfile] = useState(null) + const [profileJson, setProfileJson] = useState('') + const [loading, setLoading] = useState(true) + const [error, setError] = useState('') + const [success, setSuccess] = useState('') + + useEffect(() => { + load() + }, []) + + const load = async () => { + try { + setLoading(true) + const [typesData, statsData, templatesData] = await Promise.all([ + api.get('/admin/training-types'), + api.get('/admin/training-types/profiles/stats'), + api.get('/admin/training-types/profiles/templates') + ]) + setTrainingTypes(typesData) + setStats(statsData) + setTemplates(templatesData) + } catch (e) { + setError(e.message) + } finally { + setLoading(false) + } + } + + const openEditor = (type) => { + setSelectedType(type) + setEditingProfile(type.profile || null) + setProfileJson(JSON.stringify(type.profile || {}, null, 2)) + setError('') + setSuccess('') + } + + const closeEditor = () => { + setSelectedType(null) + setEditingProfile(null) + setProfileJson('') + } + + const saveProfile = async () => { + try { + // Validate JSON + const profile = JSON.parse(profileJson) + + // Update training type + await api.put(`/admin/training-types/${selectedType.id}`, { profile }) + + setSuccess(`Profil fΓΌr "${selectedType.name_de}" gespeichert`) + closeEditor() + load() + } catch (e) { + setError(e.message || 'UngΓΌltiges JSON') + } + } + + const applyTemplate = async (typeId, templateKey) => { + if (!confirm(`Template "${templateKey}" auf diesen Trainingstyp anwenden?`)) return + + try { + await api.post(`/admin/training-types/${typeId}/profile/apply-template`, { + template_key: templateKey + }) + setSuccess('Template erfolgreich angewendet') + load() + } catch (e) { + setError(e.message) + } + } + + const batchReEvaluate = async () => { + if (!confirm('Alle AktivitΓ€ten neu evaluieren? Das kann einige Sekunden dauern.')) return + + try { + const result = await api.post('/evaluation/batch') + setSuccess( + `Batch-Evaluation abgeschlossen: ${result.stats.evaluated} evaluiert, ` + + `${result.stats.skipped} ΓΌbersprungen, ${result.stats.errors} Fehler` + ) + } catch (e) { + setError(e.message) + } + } + + if (loading) return
+ + return ( +
+

Training Type Profiles

+

+ Konfiguriere Bewertungsprofile fΓΌr Trainingstypen +

+ + {error && ( +
+ {error} +
+ )} + + {success && ( +
+ {success} +
+ )} + + {/* Statistics */} + {stats && ( +
+

Übersicht

+
+
+
{stats.total}
+
Trainingstypen gesamt
+
+
+
{stats.configured}
+
Profile konfiguriert
+
+
+
{stats.unconfigured}
+
Noch keine Profile
+
+
+ + +
+ )} + + {/* Training Types List */} +
+

Trainingstypen

+ +
+ {trainingTypes.map(type => ( +
+
{type.icon || 'πŸ“Š'}
+
+
+ {type.name_de} + {type.profile && ( + + βœ“ Profil + + )} +
+
+ {type.category} {type.subcategory && `Β· ${type.subcategory}`} +
+
+ +
+ {/* Template Buttons */} + {templates.map(template => ( + + ))} + + +
+
+ ))} +
+
+ + {/* Profile Editor Modal */} + {selectedType && ( +
+
+
+

{selectedType.icon} {selectedType.name_de} - Profil bearbeiten

+ +
+ +

+ JSON-basierter Editor. Siehe Dokumentation fΓΌr vollstΓ€ndige Struktur. +

+ +