diff --git a/backend/routers/activity.py b/backend/routers/activity.py index b718134..a7a2e64 100644 --- a/backend/routers/activity.py +++ b/backend/routers/activity.py @@ -16,7 +16,15 @@ from auth import require_auth, check_feature_access, increment_feature_usage from models import ActivityEntry from routers.profiles import get_pid from feature_logger import log_feature_usage -from evaluation_helper import evaluate_and_save_activity + +# Evaluation import with error handling (Phase 1.2) +try: + from evaluation_helper import evaluate_and_save_activity + EVALUATION_AVAILABLE = True +except Exception as e: + logger.warning(f"[AUTO-EVAL] Evaluation system not available: {e}") + EVALUATION_AVAILABLE = False + evaluate_and_save_activity = None router = APIRouter(prefix="/api/activity", tags=["activity"]) logger = logging.getLogger(__name__) @@ -66,24 +74,25 @@ def create_activity(e: ActivityEntry, x_profile_id: Optional[str]=Header(default d['rpe'],d['source'],d['notes'])) # Phase 1.2: Auto-evaluation after INSERT - # Load the activity data to evaluate - cur.execute(""" - SELECT id, profile_id, date, training_type_id, duration_min, - hr_avg, hr_max, distance_km, kcal_active, kcal_resting, - rpe, pace_min_per_km, cadence, elevation_gain - FROM activity_log - WHERE id = %s - """, (eid,)) - activity_row = cur.fetchone() - if activity_row: - activity_dict = dict(activity_row) - training_type_id = activity_dict.get("training_type_id") - if training_type_id: - try: - evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) - logger.info(f"[AUTO-EVAL] Evaluated activity {eid} on INSERT") - except Exception as eval_error: - logger.error(f"[AUTO-EVAL] Failed to evaluate activity {eid}: {eval_error}") + if EVALUATION_AVAILABLE: + # Load the activity data to evaluate + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE id = %s + """, (eid,)) + activity_row = cur.fetchone() + if activity_row: + activity_dict = dict(activity_row) + training_type_id = activity_dict.get("training_type_id") + if training_type_id: + try: + evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) + logger.info(f"[AUTO-EVAL] Evaluated activity {eid} on INSERT") + except Exception as eval_error: + logger.error(f"[AUTO-EVAL] Failed to evaluate activity {eid}: {eval_error}") # Phase 2: Increment usage counter (always for new entries) increment_feature_usage(pid, 'activity_entries') @@ -102,24 +111,25 @@ def update_activity(eid: str, e: ActivityEntry, x_profile_id: Optional[str]=Head list(d.values())+[eid,pid]) # Phase 1.2: Auto-evaluation after UPDATE - # Load the updated activity data to evaluate - cur.execute(""" - SELECT id, profile_id, date, training_type_id, duration_min, - hr_avg, hr_max, distance_km, kcal_active, kcal_resting, - rpe, pace_min_per_km, cadence, elevation_gain - FROM activity_log - WHERE id = %s - """, (eid,)) - activity_row = cur.fetchone() - if activity_row: - activity_dict = dict(activity_row) - training_type_id = activity_dict.get("training_type_id") - if training_type_id: - try: - evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) - logger.info(f"[AUTO-EVAL] Re-evaluated activity {eid} on UPDATE") - except Exception as eval_error: - logger.error(f"[AUTO-EVAL] Failed to re-evaluate activity {eid}: {eval_error}") + if EVALUATION_AVAILABLE: + # Load the updated activity data to evaluate + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE id = %s + """, (eid,)) + activity_row = cur.fetchone() + if activity_row: + activity_dict = dict(activity_row) + training_type_id = activity_dict.get("training_type_id") + if training_type_id: + try: + evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid) + logger.info(f"[AUTO-EVAL] Re-evaluated activity {eid} on UPDATE") + except Exception as eval_error: + logger.error(f"[AUTO-EVAL] Failed to re-evaluate activity {eid}: {eval_error}") return {"id":eid} @@ -257,28 +267,29 @@ def bulk_categorize_activities( updated_count = cur.rowcount # Phase 1.2: Auto-evaluation after bulk categorization - # Load all activities that were just updated and evaluate them - cur.execute(""" - SELECT id, profile_id, date, training_type_id, duration_min, - hr_avg, hr_max, distance_km, kcal_active, kcal_resting, - rpe, pace_min_per_km, cadence, elevation_gain - FROM activity_log - WHERE profile_id = %s - AND activity_type = %s - AND training_type_id = %s - """, (pid, activity_type, training_type_id)) + if EVALUATION_AVAILABLE: + # Load all activities that were just updated and evaluate them + cur.execute(""" + SELECT id, profile_id, date, training_type_id, duration_min, + hr_avg, hr_max, distance_km, kcal_active, kcal_resting, + rpe, pace_min_per_km, cadence, elevation_gain + FROM activity_log + WHERE profile_id = %s + AND activity_type = %s + AND training_type_id = %s + """, (pid, activity_type, training_type_id)) - activities_to_evaluate = cur.fetchall() - evaluated_count = 0 - for activity_row in activities_to_evaluate: - activity_dict = dict(activity_row) - try: - evaluate_and_save_activity(cur, activity_dict["id"], activity_dict, training_type_id, pid) - evaluated_count += 1 - except Exception as eval_error: - logger.warning(f"[AUTO-EVAL] Failed to evaluate bulk-categorized activity {activity_dict['id']}: {eval_error}") + activities_to_evaluate = cur.fetchall() + evaluated_count = 0 + for activity_row in activities_to_evaluate: + activity_dict = dict(activity_row) + try: + evaluate_and_save_activity(cur, activity_dict["id"], activity_dict, training_type_id, pid) + evaluated_count += 1 + except Exception as eval_error: + logger.warning(f"[AUTO-EVAL] Failed to evaluate bulk-categorized activity {activity_dict['id']}: {eval_error}") - logger.info(f"[AUTO-EVAL] Evaluated {evaluated_count}/{updated_count} bulk-categorized activities") + logger.info(f"[AUTO-EVAL] Evaluated {evaluated_count}/{updated_count} bulk-categorized activities") # Save mapping for future imports (upsert) cur.execute(""" @@ -369,7 +380,7 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional skipped += 1 # Count as skipped (not newly inserted) # Phase 1.2: Auto-evaluation after CSV import UPDATE - if training_type_id: + if EVALUATION_AVAILABLE and training_type_id: try: # Build activity dict for evaluation activity_dict = { @@ -408,7 +419,7 @@ async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional inserted+=1 # Phase 1.2: Auto-evaluation after CSV import INSERT - if training_type_id: + if EVALUATION_AVAILABLE and training_type_id: try: # Build activity dict for evaluation activity_dict = {