## Implemented
### DB-Schema (Migrations)
- Migration 013: training_parameters table (16 standard parameters)
- Migration 014: training_types.profile + activity_log.evaluation columns
- Performance metric calculations (avg_hr_percent, kcal_per_km)
### Backend - Rule Engine
- RuleEvaluator: Generic rule evaluation with 9 operators
- gte, lte, gt, lt, eq, neq, between, in, not_in
- Weighted scoring system
- Pass strategies: all_must_pass, weighted_score, at_least_n
- IntensityZoneEvaluator: HR zone analysis
- TrainingEffectsEvaluator: Abilities development
### Backend - Master Evaluator
- TrainingProfileEvaluator: 7-dimensional evaluation
1. Minimum Requirements (Quality Gates)
2. Intensity Zones (HR zones)
3. Training Effects (Abilities)
4. Periodization (Frequency & Recovery)
5. Performance Indicators (KPIs)
6. Safety (Warnings)
7. AI Context (simplified for MVP)
- evaluation_helper.py: Utilities for loading + saving
- routers/evaluation.py: API endpoints
- POST /api/evaluation/activity/{id}
- POST /api/evaluation/batch
- GET /api/evaluation/parameters
### Integration
- main.py: Router registration
## TODO (Phase 1.2)
- Auto-evaluation on activity INSERT/UPDATE
- Admin-UI for profile editing
- User-UI for results display
## Testing
- ✅ Syntax checks passed
- 🔲 Runtime testing pending (after auto-evaluation)
Part of Issue #15 - Training Type Profiles System
428 lines
14 KiB
Python
428 lines
14 KiB
Python
"""
|
|
Training Type Profiles - Rule Engine
|
|
Flexible rule evaluation system for activity quality assessment.
|
|
|
|
Issue: #15
|
|
Date: 2026-03-23
|
|
"""
|
|
from typing import Any, Dict, List, Optional, Callable
|
|
from datetime import datetime
|
|
import logging
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class RuleEvaluator:
|
|
"""
|
|
Generic rule evaluator for arbitrary parameters and operators.
|
|
|
|
Supports flexible rule definitions with various operators:
|
|
- gte, lte, gt, lt: Comparison operators
|
|
- eq, neq: Equality operators
|
|
- between: Range checks
|
|
- in, not_in: Set membership
|
|
"""
|
|
|
|
# Operator definitions
|
|
OPERATORS: Dict[str, Callable[[Any, Any], bool]] = {
|
|
"gte": lambda actual, expected: actual is not None and actual >= expected,
|
|
"lte": lambda actual, expected: actual is not None and actual <= expected,
|
|
"gt": lambda actual, expected: actual is not None and actual > expected,
|
|
"lt": lambda actual, expected: actual is not None and actual < expected,
|
|
"eq": lambda actual, expected: actual == expected,
|
|
"neq": lambda actual, expected: actual != expected,
|
|
"between": lambda actual, expected: actual is not None and expected[0] <= actual <= expected[1],
|
|
"in": lambda actual, expected: actual in expected,
|
|
"not_in": lambda actual, expected: actual not in expected,
|
|
}
|
|
|
|
OPERATOR_SYMBOLS = {
|
|
"gte": "≥",
|
|
"lte": "≤",
|
|
"gt": ">",
|
|
"lt": "<",
|
|
"eq": "=",
|
|
"neq": "≠",
|
|
"between": "⟷",
|
|
"in": "∈",
|
|
"not_in": "∉",
|
|
}
|
|
|
|
@classmethod
|
|
def evaluate_rule(
|
|
cls,
|
|
rule: Dict,
|
|
activity: Dict,
|
|
parameters_registry: Dict[str, Dict]
|
|
) -> Dict:
|
|
"""
|
|
Evaluates a single rule against an activity.
|
|
|
|
Args:
|
|
rule: {
|
|
"parameter": str,
|
|
"operator": str,
|
|
"value": Any,
|
|
"weight": int,
|
|
"optional": bool,
|
|
"reason": str
|
|
}
|
|
activity: Activity data dictionary
|
|
parameters_registry: Mapping parameter_key -> config
|
|
|
|
Returns:
|
|
{
|
|
"passed": bool,
|
|
"actual_value": Any,
|
|
"expected_value": Any,
|
|
"parameter": str,
|
|
"operator": str,
|
|
"operator_symbol": str,
|
|
"reason": str,
|
|
"weight": int,
|
|
"skipped": bool (optional),
|
|
"error": str (optional)
|
|
}
|
|
"""
|
|
param_key = rule.get("parameter")
|
|
operator = rule.get("operator")
|
|
expected_value = rule.get("value")
|
|
weight = rule.get("weight", 1)
|
|
reason = rule.get("reason", "")
|
|
optional = rule.get("optional", False)
|
|
|
|
# Get parameter configuration
|
|
param_config = parameters_registry.get(param_key)
|
|
if not param_config:
|
|
return {
|
|
"passed": False,
|
|
"parameter": param_key,
|
|
"error": f"Unknown parameter: {param_key}"
|
|
}
|
|
|
|
# Extract value from activity
|
|
source_field = param_config.get("source_field", param_key)
|
|
actual_value = activity.get(source_field)
|
|
|
|
# Optional and not provided? → Pass
|
|
if optional and actual_value is None:
|
|
return {
|
|
"passed": True,
|
|
"actual_value": None,
|
|
"expected_value": expected_value,
|
|
"parameter": param_key,
|
|
"operator": operator,
|
|
"operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator),
|
|
"reason": "Optional parameter not provided",
|
|
"weight": weight,
|
|
"skipped": True
|
|
}
|
|
|
|
# Required but not provided? → Fail
|
|
if actual_value is None:
|
|
return {
|
|
"passed": False,
|
|
"actual_value": None,
|
|
"expected_value": expected_value,
|
|
"parameter": param_key,
|
|
"operator": operator,
|
|
"operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator),
|
|
"reason": reason or "Required parameter missing",
|
|
"weight": weight
|
|
}
|
|
|
|
# Apply operator
|
|
operator_func = cls.OPERATORS.get(operator)
|
|
if not operator_func:
|
|
return {
|
|
"passed": False,
|
|
"parameter": param_key,
|
|
"error": f"Unknown operator: {operator}"
|
|
}
|
|
|
|
try:
|
|
passed = operator_func(actual_value, expected_value)
|
|
except Exception as e:
|
|
logger.error(f"[RULE-ENGINE] Error evaluating rule {param_key}: {e}")
|
|
return {
|
|
"passed": False,
|
|
"parameter": param_key,
|
|
"error": f"Evaluation error: {str(e)}"
|
|
}
|
|
|
|
return {
|
|
"passed": passed,
|
|
"actual_value": actual_value,
|
|
"expected_value": expected_value,
|
|
"parameter": param_key,
|
|
"operator": operator,
|
|
"operator_symbol": cls.OPERATOR_SYMBOLS.get(operator, operator),
|
|
"reason": reason,
|
|
"weight": weight
|
|
}
|
|
|
|
@classmethod
|
|
def evaluate_rule_set(
|
|
cls,
|
|
rule_set: Dict,
|
|
activity: Dict,
|
|
parameters_registry: Dict[str, Dict]
|
|
) -> Dict:
|
|
"""
|
|
Evaluates a complete rule set (e.g., minimum_requirements).
|
|
|
|
Args:
|
|
rule_set: {
|
|
"enabled": bool,
|
|
"pass_strategy": str,
|
|
"pass_threshold": float,
|
|
"rules": [...]
|
|
}
|
|
activity: Activity data
|
|
parameters_registry: Parameter configurations
|
|
|
|
Returns:
|
|
{
|
|
"enabled": bool,
|
|
"passed": bool,
|
|
"score": float (0-1),
|
|
"rule_results": [...],
|
|
"pass_strategy": str,
|
|
"pass_threshold": float,
|
|
"failed_rules": [...]
|
|
}
|
|
"""
|
|
if not rule_set.get("enabled", False):
|
|
return {
|
|
"enabled": False,
|
|
"passed": True,
|
|
"score": 1.0,
|
|
"rule_results": [],
|
|
"failed_rules": []
|
|
}
|
|
|
|
rules = rule_set.get("rules", [])
|
|
pass_strategy = rule_set.get("pass_strategy", "weighted_score")
|
|
pass_threshold = rule_set.get("pass_threshold", 0.6)
|
|
|
|
rule_results = []
|
|
failed_rules = []
|
|
total_weight = 0
|
|
passed_weight = 0
|
|
|
|
# Evaluate each rule
|
|
for rule in rules:
|
|
result = cls.evaluate_rule(rule, activity, parameters_registry)
|
|
rule_results.append(result)
|
|
|
|
if result.get("skipped"):
|
|
continue
|
|
|
|
if result.get("error"):
|
|
logger.warning(f"[RULE-ENGINE] Rule error: {result['error']}")
|
|
continue
|
|
|
|
weight = result.get("weight", 1)
|
|
total_weight += weight
|
|
|
|
if result["passed"]:
|
|
passed_weight += weight
|
|
else:
|
|
failed_rules.append(result)
|
|
|
|
# Calculate score
|
|
score = passed_weight / total_weight if total_weight > 0 else 1.0
|
|
|
|
# Apply pass strategy
|
|
if pass_strategy == "all_must_pass":
|
|
passed = all(
|
|
r["passed"] for r in rule_results
|
|
if not r.get("skipped") and not r.get("error")
|
|
)
|
|
elif pass_strategy == "weighted_score":
|
|
passed = score >= pass_threshold
|
|
elif pass_strategy == "at_least_n":
|
|
n = rule_set.get("at_least_n", 1)
|
|
passed_count = sum(
|
|
1 for r in rule_results
|
|
if r["passed"] and not r.get("skipped")
|
|
)
|
|
passed = passed_count >= n
|
|
else:
|
|
passed = False
|
|
logger.warning(f"[RULE-ENGINE] Unknown pass strategy: {pass_strategy}")
|
|
|
|
return {
|
|
"enabled": True,
|
|
"passed": passed,
|
|
"score": round(score, 2),
|
|
"rule_results": rule_results,
|
|
"failed_rules": failed_rules,
|
|
"pass_strategy": pass_strategy,
|
|
"pass_threshold": pass_threshold
|
|
}
|
|
|
|
|
|
class IntensityZoneEvaluator:
|
|
"""
|
|
Evaluates heart rate zones and time distribution.
|
|
"""
|
|
|
|
@staticmethod
|
|
def evaluate(
|
|
zone_config: Dict,
|
|
activity: Dict,
|
|
user_profile: Dict
|
|
) -> Dict:
|
|
"""
|
|
Evaluates which HR zone the activity was in.
|
|
|
|
Args:
|
|
zone_config: intensity_zones configuration
|
|
activity: Activity data (with hr_avg)
|
|
user_profile: User profile (with hf_max)
|
|
|
|
Returns:
|
|
{
|
|
"enabled": bool,
|
|
"dominant_zone": str,
|
|
"avg_hr_percent": float,
|
|
"zone_color": str,
|
|
"zone_effect": str,
|
|
"duration_quality": float (0-1),
|
|
"recommendation": str
|
|
}
|
|
"""
|
|
if not zone_config.get("enabled", False):
|
|
return {"enabled": False}
|
|
|
|
avg_hr = activity.get("hr_avg")
|
|
user_max_hr = user_profile.get("hf_max", 180) # Default 180 if not set
|
|
|
|
if not avg_hr or not user_max_hr:
|
|
return {
|
|
"enabled": True,
|
|
"dominant_zone": "unknown",
|
|
"avg_hr_percent": None,
|
|
"recommendation": "Herzfrequenz-Daten fehlen"
|
|
}
|
|
|
|
avg_hr_percent = (avg_hr / user_max_hr) * 100
|
|
|
|
# Find matching zone
|
|
zones = zone_config.get("zones", [])
|
|
dominant_zone = None
|
|
|
|
for zone in zones:
|
|
zone_rules = zone.get("rules", [])
|
|
for rule in zone_rules:
|
|
if rule["parameter"] == "avg_hr_percent":
|
|
min_percent, max_percent = rule["value"]
|
|
if min_percent <= avg_hr_percent <= max_percent:
|
|
dominant_zone = zone
|
|
break
|
|
if dominant_zone:
|
|
break
|
|
|
|
if not dominant_zone:
|
|
return {
|
|
"enabled": True,
|
|
"dominant_zone": "out_of_range",
|
|
"avg_hr_percent": round(avg_hr_percent, 1),
|
|
"recommendation": "Herzfrequenz außerhalb definierter Zonen"
|
|
}
|
|
|
|
# Check duration quality
|
|
duration = activity.get("duration_min", 0)
|
|
target_duration = dominant_zone.get("target_duration_min", 30)
|
|
duration_quality = min(duration / target_duration, 1.0) if target_duration > 0 else 1.0
|
|
|
|
recommendation = f"Training in Zone '{dominant_zone['name']}' (Effekt: {dominant_zone['effect']})."
|
|
if duration < target_duration:
|
|
recommendation += f" Für optimale Wirkung: {target_duration}min empfohlen."
|
|
|
|
return {
|
|
"enabled": True,
|
|
"dominant_zone": dominant_zone.get("id"),
|
|
"dominant_zone_name": dominant_zone.get("name"),
|
|
"avg_hr_percent": round(avg_hr_percent, 1),
|
|
"zone_color": dominant_zone.get("color"),
|
|
"zone_effect": dominant_zone.get("effect"),
|
|
"duration_quality": round(duration_quality, 2),
|
|
"target_duration_min": target_duration,
|
|
"actual_duration_min": duration,
|
|
"recommendation": recommendation
|
|
}
|
|
|
|
|
|
class TrainingEffectsEvaluator:
|
|
"""
|
|
Evaluates which abilities are trained by the activity.
|
|
"""
|
|
|
|
@staticmethod
|
|
def evaluate(
|
|
effects_config: Dict,
|
|
activity: Dict,
|
|
intensity_zone_result: Optional[Dict] = None
|
|
) -> Dict:
|
|
"""
|
|
Evaluates training effects (abilities trained).
|
|
|
|
Args:
|
|
effects_config: training_effects configuration
|
|
activity: Activity data
|
|
intensity_zone_result: Result from intensity zone evaluation
|
|
|
|
Returns:
|
|
{
|
|
"enabled": bool,
|
|
"abilities_trained": [...],
|
|
"total_training_load": float
|
|
}
|
|
"""
|
|
if not effects_config.get("enabled", False):
|
|
return {"enabled": False}
|
|
|
|
abilities_trained = []
|
|
|
|
# Use default effects if no conditional matching
|
|
default_effects = effects_config.get("default_effects", {})
|
|
primary_abilities = default_effects.get("primary_abilities", [])
|
|
secondary_abilities = default_effects.get("secondary_abilities", [])
|
|
|
|
# Calculate quality factor (simplified for now)
|
|
quality_factor = 1.0
|
|
|
|
# Primary abilities
|
|
for ability in primary_abilities:
|
|
abilities_trained.append({
|
|
"category": ability["category"],
|
|
"ability": ability["ability"],
|
|
"intensity": ability["intensity"],
|
|
"quality": quality_factor,
|
|
"contribution": ability["intensity"] * quality_factor,
|
|
"type": "primary"
|
|
})
|
|
|
|
# Secondary abilities
|
|
for ability in secondary_abilities:
|
|
abilities_trained.append({
|
|
"category": ability["category"],
|
|
"ability": ability["ability"],
|
|
"intensity": ability["intensity"],
|
|
"quality": quality_factor * 0.7, # Secondary = 70%
|
|
"contribution": ability["intensity"] * quality_factor * 0.7,
|
|
"type": "secondary"
|
|
})
|
|
|
|
total_training_load = sum(a["contribution"] for a in abilities_trained)
|
|
|
|
return {
|
|
"enabled": True,
|
|
"abilities_trained": abilities_trained,
|
|
"total_training_load": round(total_training_load, 2),
|
|
"metabolic_focus": effects_config.get("metabolic_focus", []),
|
|
"muscle_groups": effects_config.get("muscle_groups", [])
|
|
}
|