fix: defensive evaluation import to prevent startup crash (#15)
All checks were successful
Deploy Development / deploy (push) Successful in 49s
Build Test / lint-backend (push) Successful in 1s
Build Test / build-frontend (push) Successful in 13s

Problem: Backend crashed on startup due to evaluation import failure
Solution: Wrap evaluation_helper import in try/except

Changes:
- Import evaluation_helper with error handling
- Add EVALUATION_AVAILABLE flag
- All evaluation calls now check flag before executing
- System remains functional even if evaluation system unavailable

This prevents backend crashes if:
- Migrations haven't run yet
- Dependencies are missing
- Import errors occur

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
Lars 2026-03-23 10:59:23 +01:00
parent e11953736d
commit edd15dd556

View File

@ -16,7 +16,15 @@ from auth import require_auth, check_feature_access, increment_feature_usage
from models import ActivityEntry from models import ActivityEntry
from routers.profiles import get_pid from routers.profiles import get_pid
from feature_logger import log_feature_usage from feature_logger import log_feature_usage
from evaluation_helper import evaluate_and_save_activity
# Evaluation import with error handling (Phase 1.2)
try:
from evaluation_helper import evaluate_and_save_activity
EVALUATION_AVAILABLE = True
except Exception as e:
logger.warning(f"[AUTO-EVAL] Evaluation system not available: {e}")
EVALUATION_AVAILABLE = False
evaluate_and_save_activity = None
router = APIRouter(prefix="/api/activity", tags=["activity"]) router = APIRouter(prefix="/api/activity", tags=["activity"])
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -66,24 +74,25 @@ def create_activity(e: ActivityEntry, x_profile_id: Optional[str]=Header(default
d['rpe'],d['source'],d['notes'])) d['rpe'],d['source'],d['notes']))
# Phase 1.2: Auto-evaluation after INSERT # Phase 1.2: Auto-evaluation after INSERT
# Load the activity data to evaluate if EVALUATION_AVAILABLE:
cur.execute(""" # Load the activity data to evaluate
SELECT id, profile_id, date, training_type_id, duration_min, cur.execute("""
hr_avg, hr_max, distance_km, kcal_active, kcal_resting, SELECT id, profile_id, date, training_type_id, duration_min,
rpe, pace_min_per_km, cadence, elevation_gain hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
FROM activity_log rpe, pace_min_per_km, cadence, elevation_gain
WHERE id = %s FROM activity_log
""", (eid,)) WHERE id = %s
activity_row = cur.fetchone() """, (eid,))
if activity_row: activity_row = cur.fetchone()
activity_dict = dict(activity_row) if activity_row:
training_type_id = activity_dict.get("training_type_id") activity_dict = dict(activity_row)
if training_type_id: training_type_id = activity_dict.get("training_type_id")
try: if training_type_id:
evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) try:
logger.info(f"[AUTO-EVAL] Evaluated activity {eid} on INSERT") evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid)
except Exception as eval_error: logger.info(f"[AUTO-EVAL] Evaluated activity {eid} on INSERT")
logger.error(f"[AUTO-EVAL] Failed to evaluate activity {eid}: {eval_error}") except Exception as eval_error:
logger.error(f"[AUTO-EVAL] Failed to evaluate activity {eid}: {eval_error}")
# Phase 2: Increment usage counter (always for new entries) # Phase 2: Increment usage counter (always for new entries)
increment_feature_usage(pid, 'activity_entries') increment_feature_usage(pid, 'activity_entries')
@ -102,24 +111,25 @@ def update_activity(eid: str, e: ActivityEntry, x_profile_id: Optional[str]=Head
list(d.values())+[eid,pid]) list(d.values())+[eid,pid])
# Phase 1.2: Auto-evaluation after UPDATE # Phase 1.2: Auto-evaluation after UPDATE
# Load the updated activity data to evaluate if EVALUATION_AVAILABLE:
cur.execute(""" # Load the updated activity data to evaluate
SELECT id, profile_id, date, training_type_id, duration_min, cur.execute("""
hr_avg, hr_max, distance_km, kcal_active, kcal_resting, SELECT id, profile_id, date, training_type_id, duration_min,
rpe, pace_min_per_km, cadence, elevation_gain hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
FROM activity_log rpe, pace_min_per_km, cadence, elevation_gain
WHERE id = %s FROM activity_log
""", (eid,)) WHERE id = %s
activity_row = cur.fetchone() """, (eid,))
if activity_row: activity_row = cur.fetchone()
activity_dict = dict(activity_row) if activity_row:
training_type_id = activity_dict.get("training_type_id") activity_dict = dict(activity_row)
if training_type_id: training_type_id = activity_dict.get("training_type_id")
try: if training_type_id:
evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) try:
logger.info(f"[AUTO-EVAL] Re-evaluated activity {eid} on UPDATE") evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid)
except Exception as eval_error: logger.info(f"[AUTO-EVAL] Re-evaluated activity {eid} on UPDATE")
logger.error(f"[AUTO-EVAL] Failed to re-evaluate activity {eid}: {eval_error}") except Exception as eval_error:
logger.error(f"[AUTO-EVAL] Failed to re-evaluate activity {eid}: {eval_error}")
return {"id":eid} return {"id":eid}
@ -257,28 +267,29 @@ def bulk_categorize_activities(
updated_count = cur.rowcount updated_count = cur.rowcount
# Phase 1.2: Auto-evaluation after bulk categorization # Phase 1.2: Auto-evaluation after bulk categorization
# Load all activities that were just updated and evaluate them if EVALUATION_AVAILABLE:
cur.execute(""" # Load all activities that were just updated and evaluate them
SELECT id, profile_id, date, training_type_id, duration_min, cur.execute("""
hr_avg, hr_max, distance_km, kcal_active, kcal_resting, SELECT id, profile_id, date, training_type_id, duration_min,
rpe, pace_min_per_km, cadence, elevation_gain hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
FROM activity_log rpe, pace_min_per_km, cadence, elevation_gain
WHERE profile_id = %s FROM activity_log
AND activity_type = %s WHERE profile_id = %s
AND training_type_id = %s AND activity_type = %s
""", (pid, activity_type, training_type_id)) AND training_type_id = %s
""", (pid, activity_type, training_type_id))
activities_to_evaluate = cur.fetchall() activities_to_evaluate = cur.fetchall()
evaluated_count = 0 evaluated_count = 0
for activity_row in activities_to_evaluate: for activity_row in activities_to_evaluate:
activity_dict = dict(activity_row) activity_dict = dict(activity_row)
try: try:
evaluate_and_save_activity(cur, activity_dict["id"], activity_dict, training_type_id, pid) evaluate_and_save_activity(cur, activity_dict["id"], activity_dict, training_type_id, pid)
evaluated_count += 1 evaluated_count += 1
except Exception as eval_error: except Exception as eval_error:
logger.warning(f"[AUTO-EVAL] Failed to evaluate bulk-categorized activity {activity_dict['id']}: {eval_error}") logger.warning(f"[AUTO-EVAL] Failed to evaluate bulk-categorized activity {activity_dict['id']}: {eval_error}")
logger.info(f"[AUTO-EVAL] Evaluated {evaluated_count}/{updated_count} bulk-categorized activities") logger.info(f"[AUTO-EVAL] Evaluated {evaluated_count}/{updated_count} bulk-categorized activities")
# Save mapping for future imports (upsert) # Save mapping for future imports (upsert)
cur.execute(""" cur.execute("""
@ -369,7 +380,7 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional
skipped += 1 # Count as skipped (not newly inserted) skipped += 1 # Count as skipped (not newly inserted)
# Phase 1.2: Auto-evaluation after CSV import UPDATE # Phase 1.2: Auto-evaluation after CSV import UPDATE
if training_type_id: if EVALUATION_AVAILABLE and training_type_id:
try: try:
# Build activity dict for evaluation # Build activity dict for evaluation
activity_dict = { activity_dict = {
@ -408,7 +419,7 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional
inserted+=1 inserted+=1
# Phase 1.2: Auto-evaluation after CSV import INSERT # Phase 1.2: Auto-evaluation after CSV import INSERT
if training_type_id: if EVALUATION_AVAILABLE and training_type_id:
try: try:
# Build activity dict for evaluation # Build activity dict for evaluation
activity_dict = { activity_dict = {