feat: Training Type Profiles - Phase 1.1 Foundation (#15)
All checks were successful
Deploy Development / deploy (push) Successful in 55s
Build Test / lint-backend (push) Successful in 0s
Build Test / build-frontend (push) Successful in 13s

## Implemented

### DB-Schema (Migrations)
- Migration 013: training_parameters table (16 standard parameters)
- Migration 014: training_types.profile + activity_log.evaluation columns
- Performance metric calculations (avg_hr_percent, kcal_per_km)

### Backend - Rule Engine
- RuleEvaluator: Generic rule evaluation with 9 operators
  - gte, lte, gt, lt, eq, neq, between, in, not_in
  - Weighted scoring system
  - Pass strategies: all_must_pass, weighted_score, at_least_n

- IntensityZoneEvaluator: HR zone analysis
- TrainingEffectsEvaluator: Abilities development

### Backend - Master Evaluator
- TrainingProfileEvaluator: 7-dimensional evaluation
  1. Minimum Requirements (Quality Gates)
  2. Intensity Zones (HR zones)
  3. Training Effects (Abilities)
  4. Periodization (Frequency & Recovery)
  5. Performance Indicators (KPIs)
  6. Safety (Warnings)
  7. AI Context (simplified for MVP)

- evaluation_helper.py: Utilities for loading + saving
- routers/evaluation.py: API endpoints
  - POST /api/evaluation/activity/{id}
  - POST /api/evaluation/batch
  - GET /api/evaluation/parameters

### Integration
- main.py: Router registration

## TODO (Phase 1.2)
- Auto-evaluation on activity INSERT/UPDATE
- Admin-UI for profile editing
- User-UI for results display

## Testing
-  Syntax checks passed
- 🔲 Runtime testing pending (after auto-evaluation)

Part of Issue #15 - Training Type Profiles System
This commit is contained in:
Lars 2026-03-23 10:49:26 +01:00
parent 29770503bf
commit 1b9cd6d5e6
7 changed files with 1434 additions and 0 deletions

View File

@ -0,0 +1,252 @@
"""
Training Type Profiles - Helper Functions
Utilities for loading parameters, profiles, and running evaluations.
Issue: #15
Date: 2026-03-23
"""
from typing import Dict, Optional, List
import logging
from db import get_cursor
from profile_evaluator import TrainingProfileEvaluator
logger = logging.getLogger(__name__)
def load_parameters_registry(cur) -> Dict[str, Dict]:
"""
Loads training parameters registry from database.
Returns:
Dict mapping parameter_key -> config
"""
cur.execute("""
SELECT key, name_de, name_en, category, data_type, unit,
description_de, source_field, validation_rules
FROM training_parameters
WHERE is_active = true
""")
registry = {}
for row in cur.fetchall():
registry[row['key']] = dict(row)
return registry
def load_training_type_profile(cur, training_type_id: int) -> Optional[Dict]:
"""
Loads training type profile for a given type ID.
Returns:
Profile JSONB or None if not configured
"""
cur.execute(
"SELECT profile FROM training_types WHERE id = %s",
(training_type_id,)
)
row = cur.fetchone()
if row and row['profile']:
return row['profile']
return None
def load_evaluation_context(
cur,
profile_id: str,
activity_date: str,
lookback_days: int = 30
) -> Dict:
"""
Loads context data for evaluation (user profile + recent activities).
Args:
cur: Database cursor
profile_id: User profile ID
activity_date: Date of activity being evaluated
lookback_days: How many days of history to load
Returns:
{
"user_profile": {...},
"recent_activities": [...],
"historical_activities": [...]
}
"""
# Load user profile
cur.execute(
"SELECT hf_max, sleep_goal_minutes FROM profiles WHERE id = %s",
(profile_id,)
)
user_row = cur.fetchone()
user_profile = dict(user_row) if user_row else {}
# Load recent activities (last N days)
cur.execute("""
SELECT id, date, training_type_id, duration_min, hr_avg, hr_max,
distance_km, kcal_active, rpe
FROM activity_log
WHERE profile_id = %s
AND date >= %s::date - INTERVAL '%s days'
AND date < %s::date
ORDER BY date DESC
LIMIT 50
""", (profile_id, activity_date, lookback_days, activity_date))
recent_activities = [dict(r) for r in cur.fetchall()]
# Historical activities (same for MVP)
historical_activities = recent_activities
return {
"user_profile": user_profile,
"recent_activities": recent_activities,
"historical_activities": historical_activities
}
def evaluate_and_save_activity(
cur,
activity_id: str,
activity_data: Dict,
training_type_id: int,
profile_id: str
) -> Optional[Dict]:
"""
Evaluates an activity and saves the result to the database.
Args:
cur: Database cursor
activity_id: Activity ID
activity_data: Activity data dict
training_type_id: Training type ID
profile_id: User profile ID
Returns:
Evaluation result or None if no profile configured
"""
# Load profile
profile = load_training_type_profile(cur, training_type_id)
if not profile:
logger.info(f"[EVALUATION] No profile for training_type {training_type_id}, skipping")
return None
# Load parameters registry
parameters = load_parameters_registry(cur)
# Load context
context = load_evaluation_context(
cur,
profile_id,
activity_data.get("date"),
lookback_days=30
)
# Evaluate
evaluator = TrainingProfileEvaluator(parameters)
evaluation_result = evaluator.evaluate_activity(
activity_data,
profile,
context
)
# Save to database
from psycopg2.extras import Json
cur.execute("""
UPDATE activity_log
SET evaluation = %s,
quality_label = %s,
overall_score = %s
WHERE id = %s
""", (
Json(evaluation_result),
evaluation_result.get("quality_label"),
evaluation_result.get("overall_score"),
activity_id
))
logger.info(
f"[EVALUATION] Activity {activity_id}: "
f"{evaluation_result.get('quality_label')} "
f"(score: {evaluation_result.get('overall_score')})"
)
return evaluation_result
def batch_evaluate_activities(
cur,
profile_id: str,
limit: Optional[int] = None
) -> Dict:
"""
Re-evaluates all activities for a user.
Useful for:
- Initial setup after profiles are configured
- Re-evaluation after profile changes
Args:
cur: Database cursor
profile_id: User profile ID
limit: Optional limit for testing
Returns:
{
"total": int,
"evaluated": int,
"skipped": int,
"errors": int
}
"""
# Load all activities
query = """
SELECT id, profile_id, date, training_type_id, duration_min,
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
rpe, pace_min_per_km, cadence, elevation_gain
FROM activity_log
WHERE profile_id = %s
ORDER BY date DESC
"""
params = [profile_id]
if limit:
query += " LIMIT %s"
params.append(limit)
cur.execute(query, params)
activities = cur.fetchall()
stats = {
"total": len(activities),
"evaluated": 0,
"skipped": 0,
"errors": 0
}
for activity in activities:
activity_dict = dict(activity)
try:
result = evaluate_and_save_activity(
cur,
activity_dict["id"],
activity_dict,
activity_dict["training_type_id"],
profile_id
)
if result:
stats["evaluated"] += 1
else:
stats["skipped"] += 1
except Exception as e:
logger.error(f"[BATCH-EVAL] Error evaluating {activity_dict['id']}: {e}")
stats["errors"] += 1
logger.info(f"[BATCH-EVAL] Completed: {stats}")
return stats

View File

@ -21,6 +21,7 @@ from routers import admin, stats, exportdata, importdata
from routers import subscription, coupons, features, tiers_mgmt, tier_limits
from routers import user_restrictions, access_grants, training_types, admin_training_types
from routers import admin_activity_mappings, sleep, rest_days
from routers import evaluation # v9d/v9e Training Type Profiles (#15)
# ── App Configuration ─────────────────────────────────────────────────────────
DATA_DIR = Path(os.getenv("DATA_DIR", "./data"))
@ -92,6 +93,7 @@ app.include_router(admin_training_types.router) # /api/admin/training-types/*
app.include_router(admin_activity_mappings.router) # /api/admin/activity-mappings/*
app.include_router(sleep.router) # /api/sleep/* (v9d Phase 2b)
app.include_router(rest_days.router) # /api/rest-days/* (v9d Phase 2a)
app.include_router(evaluation.router) # /api/evaluation/* (v9d/v9e Training Profiles #15)
# ── Health Check ──────────────────────────────────────────────────────────────
@app.get("/")

View File

@ -0,0 +1,144 @@
-- Migration 013: Training Parameters Registry
-- Training Type Profiles System - Foundation
-- Date: 2026-03-23
-- Issue: #15
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- TRAINING PARAMETERS REGISTRY
-- Zentrale Definition aller messbaren Parameter für Aktivitäten
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
CREATE TABLE IF NOT EXISTS training_parameters (
id SERIAL PRIMARY KEY,
key VARCHAR(50) UNIQUE NOT NULL,
name_de VARCHAR(100) NOT NULL,
name_en VARCHAR(100) NOT NULL,
category VARCHAR(50) NOT NULL,
data_type VARCHAR(20) NOT NULL,
unit VARCHAR(20),
description_de TEXT,
description_en TEXT,
source_field VARCHAR(100),
validation_rules JSONB DEFAULT '{}'::jsonb,
is_active BOOLEAN DEFAULT true,
created_at TIMESTAMP DEFAULT NOW(),
CONSTRAINT chk_category CHECK (category IN (
'physical', 'physiological', 'subjective', 'environmental', 'performance'
)),
CONSTRAINT chk_data_type CHECK (data_type IN (
'integer', 'float', 'string', 'boolean'
))
);
CREATE INDEX idx_training_parameters_category ON training_parameters(category) WHERE is_active = true;
CREATE INDEX idx_training_parameters_key ON training_parameters(key) WHERE is_active = true;
COMMENT ON TABLE training_parameters IS 'Registry of all measurable activity parameters (Training Type Profiles System)';
COMMENT ON COLUMN training_parameters.key IS 'Unique identifier (e.g. "avg_hr", "duration_min")';
COMMENT ON COLUMN training_parameters.category IS 'Parameter category: physical, physiological, subjective, environmental, performance';
COMMENT ON COLUMN training_parameters.data_type IS 'Data type: integer, float, string, boolean';
COMMENT ON COLUMN training_parameters.source_field IS 'Mapping to activity_log column name';
COMMENT ON COLUMN training_parameters.validation_rules IS 'Min/Max/Enum for validation (JSONB)';
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- STANDARD PARAMETERS
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
INSERT INTO training_parameters (key, name_de, name_en, category, data_type, unit, source_field, validation_rules, description_de, description_en) VALUES
-- Physical Parameters
('duration_min', 'Dauer', 'Duration', 'physical', 'integer', 'min', 'duration_min',
'{"min": 0, "max": 600}'::jsonb,
'Trainingsdauer in Minuten',
'Training duration in minutes'),
('distance_km', 'Distanz', 'Distance', 'physical', 'float', 'km', 'distance_km',
'{"min": 0, "max": 200}'::jsonb,
'Zurückgelegte Distanz in Kilometern',
'Distance covered in kilometers'),
('kcal_active', 'Aktive Kalorien', 'Active Calories', 'physical', 'integer', 'kcal', 'kcal_active',
'{"min": 0, "max": 5000}'::jsonb,
'Aktiver Kalorienverbrauch',
'Active calorie burn'),
('kcal_resting', 'Ruhekalorien', 'Resting Calories', 'physical', 'integer', 'kcal', 'kcal_resting',
'{"min": 0, "max": 2000}'::jsonb,
'Ruheumsatz während Training',
'Resting calorie burn during training'),
('elevation_gain', 'Höhenmeter', 'Elevation Gain', 'physical', 'integer', 'm', 'elevation_gain',
'{"min": 0, "max": 5000}'::jsonb,
'Überwundene Höhenmeter',
'Elevation gain in meters'),
('pace_min_per_km', 'Pace', 'Pace', 'physical', 'float', 'min/km', 'pace_min_per_km',
'{"min": 2, "max": 20}'::jsonb,
'Durchschnittstempo in Minuten pro Kilometer',
'Average pace in minutes per kilometer'),
('cadence', 'Trittfrequenz', 'Cadence', 'physical', 'integer', 'spm', 'cadence',
'{"min": 0, "max": 220}'::jsonb,
'Schrittfrequenz (Schritte pro Minute)',
'Step frequency (steps per minute)'),
-- Physiological Parameters
('avg_hr', 'Durchschnittspuls', 'Average Heart Rate', 'physiological', 'integer', 'bpm', 'hr_avg',
'{"min": 30, "max": 220}'::jsonb,
'Durchschnittliche Herzfrequenz',
'Average heart rate'),
('max_hr', 'Maximalpuls', 'Max Heart Rate', 'physiological', 'integer', 'bpm', 'hr_max',
'{"min": 40, "max": 220}'::jsonb,
'Maximale Herzfrequenz',
'Maximum heart rate'),
('min_hr', 'Minimalpuls', 'Min Heart Rate', 'physiological', 'integer', 'bpm', 'hr_min',
'{"min": 30, "max": 200}'::jsonb,
'Minimale Herzfrequenz',
'Minimum heart rate'),
('avg_power', 'Durchschnittsleistung', 'Average Power', 'physiological', 'integer', 'W', 'avg_power',
'{"min": 0, "max": 1000}'::jsonb,
'Durchschnittliche Leistung in Watt',
'Average power output in watts'),
-- Subjective Parameters
('rpe', 'RPE (Anstrengung)', 'RPE (Perceived Exertion)', 'subjective', 'integer', 'scale', 'rpe',
'{"min": 1, "max": 10}'::jsonb,
'Subjektive Anstrengung (Rate of Perceived Exertion)',
'Rate of Perceived Exertion'),
-- Environmental Parameters
('temperature_celsius', 'Temperatur', 'Temperature', 'environmental', 'float', '°C', 'temperature_celsius',
'{"min": -30, "max": 50}'::jsonb,
'Umgebungstemperatur in Celsius',
'Ambient temperature in Celsius'),
('humidity_percent', 'Luftfeuchtigkeit', 'Humidity', 'environmental', 'integer', '%', 'humidity_percent',
'{"min": 0, "max": 100}'::jsonb,
'Relative Luftfeuchtigkeit in Prozent',
'Relative humidity in percent'),
-- Performance Parameters (calculated)
('avg_hr_percent', '% Max-HF', '% Max HR', 'performance', 'float', '%', 'avg_hr_percent',
'{"min": 0, "max": 100}'::jsonb,
'Durchschnittspuls als Prozent der maximalen Herzfrequenz',
'Average heart rate as percentage of max heart rate'),
('kcal_per_km', 'Kalorien pro km', 'Calories per km', 'performance', 'float', 'kcal/km', 'kcal_per_km',
'Kalorienverbrauch pro Kilometer',
'Calorie burn per kilometer');
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- SUMMARY
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- Display inserted parameters
DO $$
BEGIN
RAISE NOTICE '✓ Migration 013 completed';
RAISE NOTICE ' - Created training_parameters table';
RAISE NOTICE ' - Inserted % standard parameters', (SELECT COUNT(*) FROM training_parameters);
END $$;

View File

@ -0,0 +1,114 @@
-- Migration 014: Training Type Profiles & Activity Evaluation
-- Training Type Profiles System - Schema Extensions
-- Date: 2026-03-23
-- Issue: #15
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- EXTEND TRAINING TYPES
-- Add profile column for comprehensive training type configuration
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
ALTER TABLE training_types ADD COLUMN IF NOT EXISTS profile JSONB DEFAULT NULL;
CREATE INDEX idx_training_types_profile_enabled ON training_types
((profile->'rule_sets'->'minimum_requirements'->>'enabled'))
WHERE profile IS NOT NULL;
COMMENT ON COLUMN training_types.profile IS 'Comprehensive training type profile with 7 dimensions (rule_sets, intensity_zones, training_effects, periodization, performance_indicators, safety, ai_context)';
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- EXTEND ACTIVITY LOG
-- Add evaluation results and quality labels
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS evaluation JSONB DEFAULT NULL;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS quality_label VARCHAR(20);
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS overall_score FLOAT;
CREATE INDEX idx_activity_quality_label ON activity_log(quality_label)
WHERE quality_label IS NOT NULL;
CREATE INDEX idx_activity_overall_score ON activity_log(overall_score DESC)
WHERE overall_score IS NOT NULL;
CREATE INDEX idx_activity_evaluation_passed ON activity_log
((evaluation->'rule_set_results'->'minimum_requirements'->>'passed'))
WHERE evaluation IS NOT NULL;
COMMENT ON COLUMN activity_log.evaluation IS 'Complete evaluation result (7 dimensions, scores, recommendations, warnings)';
COMMENT ON COLUMN activity_log.quality_label IS 'Quality label: excellent, good, acceptable, poor (for quick filtering)';
COMMENT ON COLUMN activity_log.overall_score IS 'Overall quality score 0.0-1.0 (for sorting)';
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- ADD MISSING COLUMNS (if not already added by previous migrations)
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- Add HR columns if not exist (might be in Migration 008)
DO $$
BEGIN
IF NOT EXISTS (SELECT 1 FROM information_schema.columns
WHERE table_name='activity_log' AND column_name='hr_min') THEN
ALTER TABLE activity_log ADD COLUMN hr_min INTEGER CHECK (hr_min > 0 AND hr_min < 200);
END IF;
END $$;
-- Add performance columns for calculated values
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS avg_hr_percent FLOAT;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS kcal_per_km FLOAT;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS pace_min_per_km FLOAT;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS cadence INTEGER;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS avg_power INTEGER;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS elevation_gain INTEGER;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS temperature_celsius FLOAT;
ALTER TABLE activity_log ADD COLUMN IF NOT EXISTS humidity_percent INTEGER;
COMMENT ON COLUMN activity_log.avg_hr_percent IS 'Average HR as percentage of user max HR (calculated)';
COMMENT ON COLUMN activity_log.kcal_per_km IS 'Calories burned per kilometer (calculated)';
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- HELPER FUNCTION: Calculate avg_hr_percent
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
CREATE OR REPLACE FUNCTION calculate_avg_hr_percent()
RETURNS TRIGGER AS $$
DECLARE
user_max_hr INTEGER;
BEGIN
-- Get user's max HR from profile
SELECT hf_max INTO user_max_hr
FROM profiles
WHERE id = NEW.profile_id;
-- Calculate percentage if both values exist
IF NEW.hr_avg IS NOT NULL AND user_max_hr IS NOT NULL AND user_max_hr > 0 THEN
NEW.avg_hr_percent := (NEW.hr_avg::float / user_max_hr::float) * 100;
END IF;
-- Calculate kcal per km
IF NEW.kcal_active IS NOT NULL AND NEW.distance_km IS NOT NULL AND NEW.distance_km > 0 THEN
NEW.kcal_per_km := NEW.kcal_active::float / NEW.distance_km;
END IF;
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- Trigger for automatic calculation
DROP TRIGGER IF EXISTS trg_calculate_performance_metrics ON activity_log;
CREATE TRIGGER trg_calculate_performance_metrics
BEFORE INSERT OR UPDATE ON activity_log
FOR EACH ROW
EXECUTE FUNCTION calculate_avg_hr_percent();
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
-- SUMMARY
-- ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
DO $$
BEGIN
RAISE NOTICE '✓ Migration 014 completed';
RAISE NOTICE ' - Extended training_types with profile column';
RAISE NOTICE ' - Extended activity_log with evaluation columns';
RAISE NOTICE ' - Added performance metric calculations';
RAISE NOTICE ' - Created indexes for fast queries';
END $$;

View File

@ -0,0 +1,349 @@
"""
Training Type Profiles - Master Evaluator
Comprehensive activity evaluation across all 7 dimensions.
Issue: #15
Date: 2026-03-23
"""
from typing import Dict, Optional, List
from datetime import datetime
import logging
from rule_engine import RuleEvaluator, IntensityZoneEvaluator, TrainingEffectsEvaluator
logger = logging.getLogger(__name__)
class TrainingProfileEvaluator:
"""
Master class for comprehensive activity evaluation.
Evaluates an activity against a training type profile across 7 dimensions:
1. Minimum Requirements (Quality Gates)
2. Intensity Zones (HR zones)
3. Training Effects (Abilities)
4. Periodization (Frequency & Recovery)
5. Performance Indicators (KPIs)
6. Safety (Warnings)
7. AI Context
"""
def __init__(self, parameters_registry: Dict[str, Dict]):
"""
Initialize evaluator with parameter registry.
Args:
parameters_registry: Dict mapping parameter_key -> config
"""
self.parameters_registry = parameters_registry
self.rule_evaluator = RuleEvaluator()
self.zone_evaluator = IntensityZoneEvaluator()
self.effects_evaluator = TrainingEffectsEvaluator()
def evaluate_activity(
self,
activity: Dict,
training_type_profile: Optional[Dict],
context: Optional[Dict] = None
) -> Dict:
"""
Complete evaluation of an activity against its training type profile.
Args:
activity: Activity data dictionary
training_type_profile: Training type profile (JSONB)
context: {
"user_profile": {...},
"recent_activities": [...],
"historical_activities": [...]
}
Returns:
{
"evaluated_at": ISO timestamp,
"profile_version": str,
"rule_set_results": {
"minimum_requirements": {...},
"intensity_zones": {...},
"training_effects": {...},
"periodization": {...},
"performance_indicators": {...},
"safety": {...}
},
"overall_score": float (0-1),
"quality_label": str,
"recommendations": [str],
"warnings": [str]
}
"""
# No profile? Return unvalidated result
if not training_type_profile:
return self._create_unvalidated_result()
rule_sets = training_type_profile.get("rule_sets", {})
context = context or {}
results = {
"evaluated_at": datetime.now().isoformat(),
"profile_version": training_type_profile.get("version", "unknown"),
"rule_set_results": {}
}
# ━━━ 1. MINIMUM REQUIREMENTS ━━━
if "minimum_requirements" in rule_sets:
results["rule_set_results"]["minimum_requirements"] = \
self.rule_evaluator.evaluate_rule_set(
rule_sets["minimum_requirements"],
activity,
self.parameters_registry
)
# ━━━ 2. INTENSITY ZONES ━━━
if "intensity_zones" in rule_sets:
results["rule_set_results"]["intensity_zones"] = \
self.zone_evaluator.evaluate(
rule_sets["intensity_zones"],
activity,
context.get("user_profile", {})
)
# ━━━ 3. TRAINING EFFECTS ━━━
if "training_effects" in rule_sets:
results["rule_set_results"]["training_effects"] = \
self.effects_evaluator.evaluate(
rule_sets["training_effects"],
activity,
results["rule_set_results"].get("intensity_zones")
)
# ━━━ 4. PERIODIZATION ━━━
if "periodization" in rule_sets:
results["rule_set_results"]["periodization"] = \
self._evaluate_periodization(
rule_sets["periodization"],
activity,
context.get("recent_activities", [])
)
# ━━━ 5. PERFORMANCE INDICATORS ━━━
if "performance_indicators" in rule_sets:
results["rule_set_results"]["performance_indicators"] = \
self._evaluate_performance(
rule_sets["performance_indicators"],
activity,
context.get("historical_activities", [])
)
# ━━━ 6. SAFETY WARNINGS ━━━
if "safety" in rule_sets:
results["rule_set_results"]["safety"] = \
self._evaluate_safety(
rule_sets["safety"],
activity
)
# ━━━ OVERALL SCORE & QUALITY LABEL ━━━
overall_score = self._calculate_overall_score(results["rule_set_results"])
results["overall_score"] = overall_score
results["quality_label"] = self._get_quality_label(overall_score)
# ━━━ RECOMMENDATIONS & WARNINGS ━━━
results["recommendations"] = self._generate_recommendations(results)
results["warnings"] = self._collect_warnings(results)
return results
def _create_unvalidated_result(self) -> Dict:
"""Creates result for activities without profile."""
return {
"evaluated_at": datetime.now().isoformat(),
"profile_version": None,
"rule_set_results": {},
"overall_score": None,
"quality_label": None,
"recommendations": ["Kein Trainingsprofil konfiguriert"],
"warnings": []
}
def _evaluate_periodization(
self,
config: Dict,
activity: Dict,
recent_activities: List[Dict]
) -> Dict:
"""
Evaluates periodization compliance (frequency & recovery).
Simplified for MVP - full implementation later.
"""
if not config.get("enabled", False):
return {"enabled": False}
# Basic frequency check
training_type_id = activity.get("training_type_id")
same_type_this_week = sum(
1 for a in recent_activities
if a.get("training_type_id") == training_type_id
)
frequency_config = config.get("frequency", {})
optimal = frequency_config.get("per_week_optimal", 3)
return {
"enabled": True,
"weekly_count": same_type_this_week,
"optimal_count": optimal,
"frequency_status": "optimal" if same_type_this_week <= optimal else "over_optimal",
"recovery_adequate": True, # Simplified for MVP
"warning": None
}
def _evaluate_performance(
self,
config: Dict,
activity: Dict,
historical_activities: List[Dict]
) -> Dict:
"""
Evaluates performance development.
Simplified for MVP - full implementation later.
"""
if not config.get("enabled", False):
return {"enabled": False}
return {
"enabled": True,
"trend": "stable", # Simplified
"metrics_comparison": {},
"benchmark_level": "intermediate"
}
def _evaluate_safety(self, config: Dict, activity: Dict) -> Dict:
"""
Evaluates safety warnings.
"""
if not config.get("enabled", False):
return {"enabled": False, "warnings": []}
warnings_config = config.get("warnings", [])
triggered_warnings = []
for warning_rule in warnings_config:
param_key = warning_rule.get("parameter")
operator = warning_rule.get("operator")
threshold = warning_rule.get("value")
severity = warning_rule.get("severity", "medium")
message = warning_rule.get("message", "")
actual_value = activity.get(param_key)
if actual_value is not None:
operator_func = RuleEvaluator.OPERATORS.get(operator)
if operator_func and operator_func(actual_value, threshold):
triggered_warnings.append({
"severity": severity,
"message": message,
"parameter": param_key,
"actual_value": actual_value,
"threshold": threshold
})
return {
"enabled": True,
"warnings": triggered_warnings
}
def _calculate_overall_score(self, rule_set_results: Dict) -> float:
"""
Calculates weighted overall score.
Weights:
- Minimum Requirements: 40%
- Intensity Zones: 20%
- Periodization: 20%
- Performance: 10%
- Training Effects: 10%
"""
weights = {
"minimum_requirements": 0.4,
"intensity_zones": 0.2,
"periodization": 0.2,
"performance_indicators": 0.1,
"training_effects": 0.1
}
total_score = 0.0
total_weight = 0.0
for rule_set_name, weight in weights.items():
result = rule_set_results.get(rule_set_name)
if result and result.get("enabled"):
score = result.get("score", 0.5)
# Special handling for different result types
if rule_set_name == "intensity_zones":
score = result.get("duration_quality", 0.5)
elif rule_set_name == "periodization":
score = 1.0 if result.get("recovery_adequate", False) else 0.5
total_score += score * weight
total_weight += weight
return round(total_score / total_weight, 2) if total_weight > 0 else 0.5
def _get_quality_label(self, score: Optional[float]) -> Optional[str]:
"""Converts score to quality label."""
if score is None:
return None
if score >= 0.9:
return "excellent"
elif score >= 0.7:
return "good"
elif score >= 0.5:
return "acceptable"
else:
return "poor"
def _generate_recommendations(self, results: Dict) -> List[str]:
"""Generates actionable recommendations."""
recommendations = []
# Check minimum requirements
min_req = results["rule_set_results"].get("minimum_requirements", {})
if min_req.get("enabled") and not min_req.get("passed"):
for failed in min_req.get("failed_rules", []):
param = failed.get("parameter")
actual = failed.get("actual_value")
expected = failed.get("expected_value")
reason = failed.get("reason", "")
symbol = failed.get("operator_symbol", "")
recommendations.append(
f"{param}: {actual} {symbol} {expected} - {reason}"
)
# Check intensity zones
zone_result = results["rule_set_results"].get("intensity_zones", {})
if zone_result.get("enabled") and zone_result.get("recommendation"):
recommendations.append(zone_result["recommendation"])
# Default recommendation if excellent
if results.get("quality_label") == "excellent" and not recommendations:
recommendations.append("Hervorragendes Training! Weiter so.")
return recommendations
def _collect_warnings(self, results: Dict) -> List[str]:
"""Collects all warnings from safety checks."""
safety_result = results["rule_set_results"].get("safety", {})
if not safety_result.get("enabled"):
return []
warnings = []
for warning in safety_result.get("warnings", []):
severity_icon = "🔴" if warning["severity"] == "high" else "⚠️"
warnings.append(f"{severity_icon} {warning['message']}")
return warnings

View File

@ -0,0 +1,146 @@
"""
Evaluation Endpoints - Training Type Profiles
Endpoints for activity evaluation and re-evaluation.
Issue: #15
Date: 2026-03-23
"""
import logging
from typing import Optional
from fastapi import APIRouter, HTTPException, Depends
from db import get_db, get_cursor, r2d
from auth import require_auth, require_admin
from evaluation_helper import (
evaluate_and_save_activity,
batch_evaluate_activities,
load_parameters_registry
)
router = APIRouter(prefix="/api/evaluation", tags=["evaluation"])
logger = logging.getLogger(__name__)
@router.get("/parameters")
def list_parameters(session: dict = Depends(require_auth)):
"""
List all available training parameters.
"""
with get_db() as conn:
cur = get_cursor(conn)
parameters = load_parameters_registry(cur)
return {
"parameters": list(parameters.values()),
"count": len(parameters)
}
@router.post("/activity/{activity_id}")
def evaluate_activity(
activity_id: str,
session: dict = Depends(require_auth)
):
"""
Evaluates or re-evaluates a single activity.
Returns the evaluation result.
"""
profile_id = session['profile_id']
with get_db() as conn:
cur = get_cursor(conn)
# Load activity
cur.execute("""
SELECT id, profile_id, date, training_type_id, duration_min,
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
rpe, pace_min_per_km, cadence, elevation_gain
FROM activity_log
WHERE id = %s AND profile_id = %s
""", (activity_id, profile_id))
activity = cur.fetchone()
if not activity:
raise HTTPException(404, "Activity not found")
activity_dict = dict(activity)
# Evaluate
result = evaluate_and_save_activity(
cur,
activity_dict["id"],
activity_dict,
activity_dict["training_type_id"],
profile_id
)
if not result:
return {
"message": "No profile configured for this training type",
"evaluation": None
}
return {
"message": "Activity evaluated",
"evaluation": result
}
@router.post("/batch")
def batch_evaluate(
limit: Optional[int] = None,
session: dict = Depends(require_auth)
):
"""
Re-evaluates all activities for the current user.
Optional limit parameter for testing.
"""
profile_id = session['profile_id']
with get_db() as conn:
cur = get_cursor(conn)
stats = batch_evaluate_activities(cur, profile_id, limit)
return {
"message": "Batch evaluation completed",
"stats": stats
}
@router.post("/batch/all")
def batch_evaluate_all(session: dict = Depends(require_admin)):
"""
Admin-only: Re-evaluates all activities for all users.
Use with caution on large databases!
"""
with get_db() as conn:
cur = get_cursor(conn)
# Get all profiles
cur.execute("SELECT id FROM profiles")
profiles = cur.fetchall()
total_stats = {
"profiles": len(profiles),
"total": 0,
"evaluated": 0,
"skipped": 0,
"errors": 0
}
for profile in profiles:
profile_id = profile['id']
stats = batch_evaluate_activities(cur, profile_id)
total_stats["total"] += stats["total"]
total_stats["evaluated"] += stats["evaluated"]
total_stats["skipped"] += stats["skipped"]
total_stats["errors"] += stats["errors"]
return {
"message": "Batch evaluation for all users completed",
"stats": total_stats
}

427
backend/rule_engine.py Normal file
View File

@ -0,0 +1,427 @@
"""
Training Type Profiles - Rule Engine
Flexible rule evaluation system for activity quality assessment.
Issue: #15
Date: 2026-03-23
"""
from typing import Any, Dict, List, Optional, Callable
from datetime import datetime
import logging
logger = logging.getLogger(__name__)
class RuleEvaluator:
"""
Generic rule evaluator for arbitrary parameters and operators.
Supports flexible rule definitions with various operators:
- gte, lte, gt, lt: Comparison operators
- eq, neq: Equality operators
- between: Range checks
- in, not_in: Set membership
"""
# Operator definitions
OPERATORS: Dict[str, Callable[[Any, Any], bool]] = {
"gte": lambda actual, expected: actual is not None and actual >= expected,
"lte": lambda actual, expected: actual is not None and actual <= expected,
"gt": lambda actual, expected: actual is not None and actual > expected,
"lt": lambda actual, expected: actual is not None and actual < expected,
"eq": lambda actual, expected: actual == expected,
"neq": lambda actual, expected: actual != expected,
"between": lambda actual, expected: actual is not None and expected[0] <= actual <= expected[1],
"in": lambda actual, expected: actual in expected,
"not_in": lambda actual, expected: actual not in expected,
}
OPERATOR_SYMBOLS = {
"gte": "",
"lte": "",
"gt": ">",
"lt": "<",
"eq": "=",
"neq": "",
"between": "",
"in": "",
"not_in": "",
}
@classmethod
def evaluate_rule(
cls,
rule: Dict,
activity: Dict,
parameters_registry: Dict[str, Dict]
) -> Dict:
"""
Evaluates a single rule against an activity.
Args:
rule: {
"parameter": str,
"operator": str,
"value": Any,
"weight": int,
"optional": bool,
"reason": str
}
activity: Activity data dictionary
parameters_registry: Mapping parameter_key -> config
Returns:
{
"passed": bool,
"actual_value": Any,
"expected_value": Any,
"parameter": str,
"operator": str,
"operator_symbol": str,
"reason": str,
"weight": int,
"skipped": bool (optional),
"error": str (optional)
}
"""
param_key = rule.get("parameter")
operator = rule.get("operator")
expected_value = rule.get("value")
weight = rule.get("weight", 1)
reason = rule.get("reason", "")
optional = rule.get("optional", False)
# Get parameter configuration
param_config = parameters_registry.get(param_key)
if not param_config:
return {
"passed": False,
"parameter": param_key,
"error": f"Unknown parameter: {param_key}"
}
# Extract value from activity
source_field = param_config.get("source_field", param_key)
actual_value = activity.get(source_field)
# Optional and not provided? → Pass
if optional and actual_value is None:
return {
"passed": True,
"actual_value": None,
"expected_value": expected_value,
"parameter": param_key,
"operator": operator,
"operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator),
"reason": "Optional parameter not provided",
"weight": weight,
"skipped": True
}
# Required but not provided? → Fail
if actual_value is None:
return {
"passed": False,
"actual_value": None,
"expected_value": expected_value,
"parameter": param_key,
"operator": operator,
"operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator),
"reason": reason or "Required parameter missing",
"weight": weight
}
# Apply operator
operator_func = cls.OPERATORS.get(operator)
if not operator_func:
return {
"passed": False,
"parameter": param_key,
"error": f"Unknown operator: {operator}"
}
try:
passed = operator_func(actual_value, expected_value)
except Exception as e:
logger.error(f"[RULE-ENGINE] Error evaluating rule {param_key}: {e}")
return {
"passed": False,
"parameter": param_key,
"error": f"Evaluation error: {str(e)}"
}
return {
"passed": passed,
"actual_value": actual_value,
"expected_value": expected_value,
"parameter": param_key,
"operator": operator,
"operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator),
"reason": reason,
"weight": weight
}
@classmethod
def evaluate_rule_set(
cls,
rule_set: Dict,
activity: Dict,
parameters_registry: Dict[str, Dict]
) -> Dict:
"""
Evaluates a complete rule set (e.g., minimum_requirements).
Args:
rule_set: {
"enabled": bool,
"pass_strategy": str,
"pass_threshold": float,
"rules": [...]
}
activity: Activity data
parameters_registry: Parameter configurations
Returns:
{
"enabled": bool,
"passed": bool,
"score": float (0-1),
"rule_results": [...],
"pass_strategy": str,
"pass_threshold": float,
"failed_rules": [...]
}
"""
if not rule_set.get("enabled", False):
return {
"enabled": False,
"passed": True,
"score": 1.0,
"rule_results": [],
"failed_rules": []
}
rules = rule_set.get("rules", [])
pass_strategy = rule_set.get("pass_strategy", "weighted_score")
pass_threshold = rule_set.get("pass_threshold", 0.6)
rule_results = []
failed_rules = []
total_weight = 0
passed_weight = 0
# Evaluate each rule
for rule in rules:
result = cls.evaluate_rule(rule, activity, parameters_registry)
rule_results.append(result)
if result.get("skipped"):
continue
if result.get("error"):
logger.warning(f"[RULE-ENGINE] Rule error: {result['error']}")
continue
weight = result.get("weight", 1)
total_weight += weight
if result["passed"]:
passed_weight += weight
else:
failed_rules.append(result)
# Calculate score
score = passed_weight / total_weight if total_weight > 0 else 1.0
# Apply pass strategy
if pass_strategy == "all_must_pass":
passed = all(
r["passed"] for r in rule_results
if not r.get("skipped") and not r.get("error")
)
elif pass_strategy == "weighted_score":
passed = score >= pass_threshold
elif pass_strategy == "at_least_n":
n = rule_set.get("at_least_n", 1)
passed_count = sum(
1 for r in rule_results
if r["passed"] and not r.get("skipped")
)
passed = passed_count >= n
else:
passed = False
logger.warning(f"[RULE-ENGINE] Unknown pass strategy: {pass_strategy}")
return {
"enabled": True,
"passed": passed,
"score": round(score, 2),
"rule_results": rule_results,
"failed_rules": failed_rules,
"pass_strategy": pass_strategy,
"pass_threshold": pass_threshold
}
class IntensityZoneEvaluator:
"""
Evaluates heart rate zones and time distribution.
"""
@staticmethod
def evaluate(
zone_config: Dict,
activity: Dict,
user_profile: Dict
) -> Dict:
"""
Evaluates which HR zone the activity was in.
Args:
zone_config: intensity_zones configuration
activity: Activity data (with hr_avg)
user_profile: User profile (with hf_max)
Returns:
{
"enabled": bool,
"dominant_zone": str,
"avg_hr_percent": float,
"zone_color": str,
"zone_effect": str,
"duration_quality": float (0-1),
"recommendation": str
}
"""
if not zone_config.get("enabled", False):
return {"enabled": False}
avg_hr = activity.get("hr_avg")
user_max_hr = user_profile.get("hf_max", 180) # Default 180 if not set
if not avg_hr or not user_max_hr:
return {
"enabled": True,
"dominant_zone": "unknown",
"avg_hr_percent": None,
"recommendation": "Herzfrequenz-Daten fehlen"
}
avg_hr_percent = (avg_hr / user_max_hr) * 100
# Find matching zone
zones = zone_config.get("zones", [])
dominant_zone = None
for zone in zones:
zone_rules = zone.get("rules", [])
for rule in zone_rules:
if rule["parameter"] == "avg_hr_percent":
min_percent, max_percent = rule["value"]
if min_percent <= avg_hr_percent <= max_percent:
dominant_zone = zone
break
if dominant_zone:
break
if not dominant_zone:
return {
"enabled": True,
"dominant_zone": "out_of_range",
"avg_hr_percent": round(avg_hr_percent, 1),
"recommendation": "Herzfrequenz außerhalb definierter Zonen"
}
# Check duration quality
duration = activity.get("duration_min", 0)
target_duration = dominant_zone.get("target_duration_min", 30)
duration_quality = min(duration / target_duration, 1.0) if target_duration > 0 else 1.0
recommendation = f"Training in Zone '{dominant_zone['name']}' (Effekt: {dominant_zone['effect']})."
if duration < target_duration:
recommendation += f" Für optimale Wirkung: {target_duration}min empfohlen."
return {
"enabled": True,
"dominant_zone": dominant_zone.get("id"),
"dominant_zone_name": dominant_zone.get("name"),
"avg_hr_percent": round(avg_hr_percent, 1),
"zone_color": dominant_zone.get("color"),
"zone_effect": dominant_zone.get("effect"),
"duration_quality": round(duration_quality, 2),
"target_duration_min": target_duration,
"actual_duration_min": duration,
"recommendation": recommendation
}
class TrainingEffectsEvaluator:
"""
Evaluates which abilities are trained by the activity.
"""
@staticmethod
def evaluate(
effects_config: Dict,
activity: Dict,
intensity_zone_result: Optional[Dict] = None
) -> Dict:
"""
Evaluates training effects (abilities trained).
Args:
effects_config: training_effects configuration
activity: Activity data
intensity_zone_result: Result from intensity zone evaluation
Returns:
{
"enabled": bool,
"abilities_trained": [...],
"total_training_load": float
}
"""
if not effects_config.get("enabled", False):
return {"enabled": False}
abilities_trained = []
# Use default effects if no conditional matching
default_effects = effects_config.get("default_effects", {})
primary_abilities = default_effects.get("primary_abilities", [])
secondary_abilities = default_effects.get("secondary_abilities", [])
# Calculate quality factor (simplified for now)
quality_factor = 1.0
# Primary abilities
for ability in primary_abilities:
abilities_trained.append({
"category": ability["category"],
"ability": ability["ability"],
"intensity": ability["intensity"],
"quality": quality_factor,
"contribution": ability["intensity"] * quality_factor,
"type": "primary"
})
# Secondary abilities
for ability in secondary_abilities:
abilities_trained.append({
"category": ability["category"],
"ability": ability["ability"],
"intensity": ability["intensity"],
"quality": quality_factor * 0.7, # Secondary = 70%
"contribution": ability["intensity"] * quality_factor * 0.7,
"type": "secondary"
})
total_training_load = sum(a["contribution"] for a in abilities_trained)
return {
"enabled": True,
"abilities_trained": abilities_trained,
"total_training_load": round(total_training_load, 2),
"metabolic_focus": effects_config.get("metabolic_focus", []),
"muscle_groups": effects_config.get("muscle_groups", [])
}