Implemented global quality_filter_level in user profiles for consistent data filtering across all views (Dashboard, History, Charts, KI-Pipeline). Backend changes: - Migration 016: Add quality_filter_level column to profiles table - quality_filter.py: Centralized helper functions for SQL filtering - insights.py: Apply global filter in _get_profile_data() - activity.py: Apply global filter in list_activity() Frontend changes: - SettingsPage.jsx: Add Datenqualität section with 4-level selector - History.jsx: Use global quality filter from profile context Filter levels: all, quality (good+excellent+acceptable), very_good (good+excellent), excellent (only excellent) Closes #31 Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
461 lines
20 KiB
Python
461 lines
20 KiB
Python
"""
|
|
Activity Tracking Endpoints for Mitai Jinkendo
|
|
|
|
Handles workout/activity logging, statistics, and Apple Health CSV import.
|
|
"""
|
|
import csv
|
|
import io
|
|
import uuid
|
|
import logging
|
|
from typing import Optional
|
|
|
|
from fastapi import APIRouter, HTTPException, UploadFile, File, Header, Depends
|
|
|
|
from db import get_db, get_cursor, r2d
|
|
from auth import require_auth, check_feature_access, increment_feature_usage
|
|
from models import ActivityEntry
|
|
from routers.profiles import get_pid
|
|
from feature_logger import log_feature_usage
|
|
from quality_filter import get_quality_filter_sql
|
|
|
|
# Evaluation import with error handling (Phase 1.2)
|
|
try:
|
|
from evaluation_helper import evaluate_and_save_activity
|
|
EVALUATION_AVAILABLE = True
|
|
except Exception as e:
|
|
logger.warning(f"[AUTO-EVAL] Evaluation system not available: {e}")
|
|
EVALUATION_AVAILABLE = False
|
|
evaluate_and_save_activity = None
|
|
|
|
router = APIRouter(prefix="/api/activity", tags=["activity"])
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
@router.get("")
|
|
def list_activity(limit: int=200, x_profile_id: Optional[str]=Header(default=None), session: dict=Depends(require_auth)):
|
|
"""Get activity entries for current profile."""
|
|
pid = get_pid(x_profile_id)
|
|
with get_db() as conn:
|
|
cur = get_cursor(conn)
|
|
|
|
# Issue #31: Apply global quality filter
|
|
cur.execute("SELECT * FROM profiles WHERE id=%s", (pid,))
|
|
profile = r2d(cur.fetchone())
|
|
quality_filter = get_quality_filter_sql(profile)
|
|
|
|
cur.execute(f"""
|
|
SELECT * FROM activity_log
|
|
WHERE profile_id=%s
|
|
{quality_filter}
|
|
ORDER BY date DESC, start_time DESC
|
|
LIMIT %s
|
|
""", (pid, limit))
|
|
return [r2d(r) for r in cur.fetchall()]
|
|
|
|
|
|
@router.post("")
|
|
def create_activity(e: ActivityEntry, x_profile_id: Optional[str]=Header(default=None), session: dict=Depends(require_auth)):
|
|
"""Create new activity entry."""
|
|
pid = get_pid(x_profile_id)
|
|
|
|
# Phase 4: Check feature access and ENFORCE
|
|
access = check_feature_access(pid, 'activity_entries')
|
|
log_feature_usage(pid, 'activity_entries', access, 'create')
|
|
|
|
if not access['allowed']:
|
|
logger.warning(
|
|
f"[FEATURE-LIMIT] User {pid} blocked: "
|
|
f"activity_entries {access['reason']} (used: {access['used']}, limit: {access['limit']})"
|
|
)
|
|
raise HTTPException(
|
|
status_code=403,
|
|
detail=f"Limit erreicht: Du hast das Kontingent für Aktivitätseinträge überschritten ({access['used']}/{access['limit']}). "
|
|
f"Bitte kontaktiere den Admin oder warte bis zum nächsten Reset."
|
|
)
|
|
|
|
eid = str(uuid.uuid4())
|
|
d = e.model_dump()
|
|
with get_db() as conn:
|
|
cur = get_cursor(conn)
|
|
cur.execute("""INSERT INTO activity_log
|
|
(id,profile_id,date,start_time,end_time,activity_type,duration_min,kcal_active,kcal_resting,
|
|
hr_avg,hr_max,distance_km,rpe,source,notes,created)
|
|
VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,CURRENT_TIMESTAMP)""",
|
|
(eid,pid,d['date'],d['start_time'],d['end_time'],d['activity_type'],d['duration_min'],
|
|
d['kcal_active'],d['kcal_resting'],d['hr_avg'],d['hr_max'],d['distance_km'],
|
|
d['rpe'],d['source'],d['notes']))
|
|
|
|
# Phase 1.2: Auto-evaluation after INSERT
|
|
if EVALUATION_AVAILABLE:
|
|
# Load the activity data to evaluate
|
|
cur.execute("""
|
|
SELECT id, profile_id, date, training_type_id, duration_min,
|
|
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
|
|
rpe, pace_min_per_km, cadence, elevation_gain
|
|
FROM activity_log
|
|
WHERE id = %s
|
|
""", (eid,))
|
|
activity_row = cur.fetchone()
|
|
if activity_row:
|
|
activity_dict = dict(activity_row)
|
|
training_type_id = activity_dict.get("training_type_id")
|
|
if training_type_id:
|
|
try:
|
|
evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid)
|
|
logger.info(f"[AUTO-EVAL] Evaluated activity {eid} on INSERT")
|
|
except Exception as eval_error:
|
|
logger.error(f"[AUTO-EVAL] Failed to evaluate activity {eid}: {eval_error}")
|
|
|
|
# Phase 2: Increment usage counter (always for new entries)
|
|
increment_feature_usage(pid, 'activity_entries')
|
|
|
|
return {"id":eid,"date":e.date}
|
|
|
|
|
|
@router.put("/{eid}")
|
|
def update_activity(eid: str, e: ActivityEntry, x_profile_id: Optional[str]=Header(default=None), session: dict=Depends(require_auth)):
|
|
"""Update existing activity entry."""
|
|
pid = get_pid(x_profile_id)
|
|
with get_db() as conn:
|
|
d = e.model_dump()
|
|
cur = get_cursor(conn)
|
|
cur.execute(f"UPDATE activity_log SET {', '.join(f'{k}=%s' for k in d)} WHERE id=%s AND profile_id=%s",
|
|
list(d.values())+[eid,pid])
|
|
|
|
# Phase 1.2: Auto-evaluation after UPDATE
|
|
if EVALUATION_AVAILABLE:
|
|
# Load the updated activity data to evaluate
|
|
cur.execute("""
|
|
SELECT id, profile_id, date, training_type_id, duration_min,
|
|
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
|
|
rpe, pace_min_per_km, cadence, elevation_gain
|
|
FROM activity_log
|
|
WHERE id = %s
|
|
""", (eid,))
|
|
activity_row = cur.fetchone()
|
|
if activity_row:
|
|
activity_dict = dict(activity_row)
|
|
training_type_id = activity_dict.get("training_type_id")
|
|
if training_type_id:
|
|
try:
|
|
evaluate_and_save_activity(cur, eid, activity_dict, training_type_id, pid)
|
|
logger.info(f"[AUTO-EVAL] Re-evaluated activity {eid} on UPDATE")
|
|
except Exception as eval_error:
|
|
logger.error(f"[AUTO-EVAL] Failed to re-evaluate activity {eid}: {eval_error}")
|
|
|
|
return {"id":eid}
|
|
|
|
|
|
@router.delete("/{eid}")
|
|
def delete_activity(eid: str, x_profile_id: Optional[str]=Header(default=None), session: dict=Depends(require_auth)):
|
|
"""Delete activity entry."""
|
|
pid = get_pid(x_profile_id)
|
|
with get_db() as conn:
|
|
cur = get_cursor(conn)
|
|
cur.execute("DELETE FROM activity_log WHERE id=%s AND profile_id=%s", (eid,pid))
|
|
return {"ok":True}
|
|
|
|
|
|
@router.get("/stats")
|
|
def activity_stats(x_profile_id: Optional[str]=Header(default=None), session: dict=Depends(require_auth)):
|
|
"""Get activity statistics (last 30 entries)."""
|
|
pid = get_pid(x_profile_id)
|
|
with get_db() as conn:
|
|
cur = get_cursor(conn)
|
|
cur.execute(
|
|
"SELECT * FROM activity_log WHERE profile_id=%s ORDER BY date DESC LIMIT 30", (pid,))
|
|
rows = [r2d(r) for r in cur.fetchall()]
|
|
if not rows: return {"count":0,"total_kcal":0,"total_min":0,"by_type":{}}
|
|
total_kcal=sum(float(r.get('kcal_active') or 0) for r in rows)
|
|
total_min=sum(float(r.get('duration_min') or 0) for r in rows)
|
|
by_type={}
|
|
for r in rows:
|
|
t=r['activity_type']; by_type.setdefault(t,{'count':0,'kcal':0,'min':0})
|
|
by_type[t]['count']+=1
|
|
by_type[t]['kcal']+=float(r.get('kcal_active') or 0)
|
|
by_type[t]['min']+=float(r.get('duration_min') or 0)
|
|
return {"count":len(rows),"total_kcal":round(total_kcal),"total_min":round(total_min),"by_type":by_type}
|
|
|
|
|
|
def get_training_type_for_activity(activity_type: str, profile_id: str = None):
|
|
"""
|
|
Map activity_type to training_type_id using database mappings.
|
|
|
|
Priority:
|
|
1. User-specific mapping (profile_id)
|
|
2. Global mapping (profile_id = NULL)
|
|
3. No mapping found → returns (None, None, None)
|
|
|
|
Returns: (training_type_id, category, subcategory) or (None, None, None)
|
|
"""
|
|
with get_db() as conn:
|
|
cur = get_cursor(conn)
|
|
|
|
# Try user-specific mapping first
|
|
if profile_id:
|
|
cur.execute("""
|
|
SELECT m.training_type_id, t.category, t.subcategory
|
|
FROM activity_type_mappings m
|
|
JOIN training_types t ON m.training_type_id = t.id
|
|
WHERE m.activity_type = %s AND m.profile_id = %s
|
|
LIMIT 1
|
|
""", (activity_type, profile_id))
|
|
row = cur.fetchone()
|
|
if row:
|
|
return (row['training_type_id'], row['category'], row['subcategory'])
|
|
|
|
# Try global mapping
|
|
cur.execute("""
|
|
SELECT m.training_type_id, t.category, t.subcategory
|
|
FROM activity_type_mappings m
|
|
JOIN training_types t ON m.training_type_id = t.id
|
|
WHERE m.activity_type = %s AND m.profile_id IS NULL
|
|
LIMIT 1
|
|
""", (activity_type,))
|
|
row = cur.fetchone()
|
|
if row:
|
|
return (row['training_type_id'], row['category'], row['subcategory'])
|
|
|
|
return (None, None, None)
|
|
|
|
|
|
@router.get("/uncategorized")
|
|
def list_uncategorized_activities(x_profile_id: Optional[str]=Header(default=None), session: dict=Depends(require_auth)):
|
|
"""Get activities without assigned training type, grouped by activity_type."""
|
|
pid = get_pid(x_profile_id)
|
|
with get_db() as conn:
|
|
cur = get_cursor(conn)
|
|
cur.execute("""
|
|
SELECT activity_type, COUNT(*) as count,
|
|
MIN(date) as first_date, MAX(date) as last_date
|
|
FROM activity_log
|
|
WHERE profile_id=%s AND training_type_id IS NULL
|
|
GROUP BY activity_type
|
|
ORDER BY count DESC
|
|
""", (pid,))
|
|
return [r2d(r) for r in cur.fetchall()]
|
|
|
|
|
|
@router.post("/bulk-categorize")
|
|
def bulk_categorize_activities(
|
|
data: dict,
|
|
x_profile_id: Optional[str]=Header(default=None),
|
|
session: dict=Depends(require_auth)
|
|
):
|
|
"""
|
|
Bulk update training type for activities.
|
|
|
|
Also saves the mapping to activity_type_mappings for future imports.
|
|
|
|
Body: {
|
|
"activity_type": "Running",
|
|
"training_type_id": 1,
|
|
"training_category": "cardio",
|
|
"training_subcategory": "running"
|
|
}
|
|
"""
|
|
pid = get_pid(x_profile_id)
|
|
activity_type = data.get('activity_type')
|
|
training_type_id = data.get('training_type_id')
|
|
training_category = data.get('training_category')
|
|
training_subcategory = data.get('training_subcategory')
|
|
|
|
if not activity_type or not training_type_id:
|
|
raise HTTPException(400, "activity_type and training_type_id required")
|
|
|
|
with get_db() as conn:
|
|
cur = get_cursor(conn)
|
|
|
|
# Update existing activities
|
|
cur.execute("""
|
|
UPDATE activity_log
|
|
SET training_type_id = %s,
|
|
training_category = %s,
|
|
training_subcategory = %s
|
|
WHERE profile_id = %s
|
|
AND activity_type = %s
|
|
AND training_type_id IS NULL
|
|
""", (training_type_id, training_category, training_subcategory, pid, activity_type))
|
|
updated_count = cur.rowcount
|
|
|
|
# Phase 1.2: Auto-evaluation after bulk categorization
|
|
if EVALUATION_AVAILABLE:
|
|
# Load all activities that were just updated and evaluate them
|
|
cur.execute("""
|
|
SELECT id, profile_id, date, training_type_id, duration_min,
|
|
hr_avg, hr_max, distance_km, kcal_active, kcal_resting,
|
|
rpe, pace_min_per_km, cadence, elevation_gain
|
|
FROM activity_log
|
|
WHERE profile_id = %s
|
|
AND activity_type = %s
|
|
AND training_type_id = %s
|
|
""", (pid, activity_type, training_type_id))
|
|
|
|
activities_to_evaluate = cur.fetchall()
|
|
evaluated_count = 0
|
|
for activity_row in activities_to_evaluate:
|
|
activity_dict = dict(activity_row)
|
|
try:
|
|
evaluate_and_save_activity(cur, activity_dict["id"], activity_dict, training_type_id, pid)
|
|
evaluated_count += 1
|
|
except Exception as eval_error:
|
|
logger.warning(f"[AUTO-EVAL] Failed to evaluate bulk-categorized activity {activity_dict['id']}: {eval_error}")
|
|
|
|
logger.info(f"[AUTO-EVAL] Evaluated {evaluated_count}/{updated_count} bulk-categorized activities")
|
|
|
|
# Save mapping for future imports (upsert)
|
|
cur.execute("""
|
|
INSERT INTO activity_type_mappings (activity_type, training_type_id, profile_id, source, updated_at)
|
|
VALUES (%s, %s, %s, 'bulk', CURRENT_TIMESTAMP)
|
|
ON CONFLICT (activity_type, profile_id)
|
|
DO UPDATE SET
|
|
training_type_id = EXCLUDED.training_type_id,
|
|
source = 'bulk',
|
|
updated_at = CURRENT_TIMESTAMP
|
|
""", (activity_type, training_type_id, pid))
|
|
|
|
logger.info(f"[MAPPING] Saved bulk mapping: {activity_type} → training_type_id {training_type_id} (profile {pid})")
|
|
|
|
return {"updated": updated_count, "activity_type": activity_type, "mapping_saved": True}
|
|
|
|
|
|
@router.post("/import-csv")
|
|
async def import_activity_csv(file: UploadFile=File(...), x_profile_id: Optional[str]=Header(default=None), session: dict=Depends(require_auth)):
|
|
"""Import Apple Health workout CSV with automatic training type mapping."""
|
|
pid = get_pid(x_profile_id)
|
|
raw = await file.read()
|
|
try: text = raw.decode('utf-8')
|
|
except: text = raw.decode('latin-1')
|
|
if text.startswith('\ufeff'): text = text[1:]
|
|
if not text.strip(): raise HTTPException(400,"Leere Datei")
|
|
reader = csv.DictReader(io.StringIO(text))
|
|
inserted = skipped = 0
|
|
with get_db() as conn:
|
|
cur = get_cursor(conn)
|
|
for row in reader:
|
|
wtype = row.get('Workout Type','').strip()
|
|
start = row.get('Start','').strip()
|
|
if not wtype or not start: continue
|
|
try: date = start[:10]
|
|
except: continue
|
|
dur = row.get('Duration','').strip()
|
|
duration_min = None
|
|
if dur:
|
|
try:
|
|
p = dur.split(':')
|
|
duration_min = round(int(p[0])*60+int(p[1])+int(p[2])/60,1)
|
|
except: pass
|
|
def kj(v):
|
|
try: return round(float(v)/4.184) if v else None
|
|
except: return None
|
|
def tf(v):
|
|
try: return round(float(v),1) if v else None
|
|
except: return None
|
|
# Map activity_type to training_type_id using database mappings
|
|
training_type_id, training_category, training_subcategory = get_training_type_for_activity(wtype, pid)
|
|
|
|
try:
|
|
# Check if entry already exists (duplicate detection by date + start_time)
|
|
cur.execute("""
|
|
SELECT id FROM activity_log
|
|
WHERE profile_id = %s AND date = %s AND start_time = %s
|
|
""", (pid, date, start))
|
|
existing = cur.fetchone()
|
|
|
|
if existing:
|
|
# Update existing entry (e.g., to add training type mapping)
|
|
existing_id = existing['id']
|
|
cur.execute("""
|
|
UPDATE activity_log
|
|
SET end_time = %s,
|
|
activity_type = %s,
|
|
duration_min = %s,
|
|
kcal_active = %s,
|
|
kcal_resting = %s,
|
|
hr_avg = %s,
|
|
hr_max = %s,
|
|
distance_km = %s,
|
|
training_type_id = %s,
|
|
training_category = %s,
|
|
training_subcategory = %s
|
|
WHERE id = %s
|
|
""", (
|
|
row.get('End',''), wtype, duration_min,
|
|
kj(row.get('Aktive Energie (kJ)','')),
|
|
kj(row.get('Ruheeinträge (kJ)','')),
|
|
tf(row.get('Durchschn. Herzfrequenz (count/min)','')),
|
|
tf(row.get('Max. Herzfrequenz (count/min)','')),
|
|
tf(row.get('Distanz (km)','')),
|
|
training_type_id, training_category, training_subcategory,
|
|
existing_id
|
|
))
|
|
skipped += 1 # Count as skipped (not newly inserted)
|
|
|
|
# Phase 1.2: Auto-evaluation after CSV import UPDATE
|
|
if EVALUATION_AVAILABLE and training_type_id:
|
|
try:
|
|
# Build activity dict for evaluation
|
|
activity_dict = {
|
|
"id": existing_id,
|
|
"profile_id": pid,
|
|
"date": date,
|
|
"training_type_id": training_type_id,
|
|
"duration_min": duration_min,
|
|
"hr_avg": tf(row.get('Durchschn. Herzfrequenz (count/min)','')),
|
|
"hr_max": tf(row.get('Max. Herzfrequenz (count/min)','')),
|
|
"distance_km": tf(row.get('Distanz (km)','')),
|
|
"kcal_active": kj(row.get('Aktive Energie (kJ)','')),
|
|
"kcal_resting": kj(row.get('Ruheeinträge (kJ)','')),
|
|
"rpe": None,
|
|
"pace_min_per_km": None,
|
|
"cadence": None,
|
|
"elevation_gain": None
|
|
}
|
|
evaluate_and_save_activity(cur, existing_id, activity_dict, training_type_id, pid)
|
|
logger.debug(f"[AUTO-EVAL] Re-evaluated updated activity {existing_id}")
|
|
except Exception as eval_error:
|
|
logger.warning(f"[AUTO-EVAL] Failed to re-evaluate updated activity {existing_id}: {eval_error}")
|
|
else:
|
|
# Insert new entry
|
|
new_id = str(uuid.uuid4())
|
|
cur.execute("""INSERT INTO activity_log
|
|
(id,profile_id,date,start_time,end_time,activity_type,duration_min,kcal_active,kcal_resting,
|
|
hr_avg,hr_max,distance_km,source,training_type_id,training_category,training_subcategory,created)
|
|
VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,'apple_health',%s,%s,%s,CURRENT_TIMESTAMP)""",
|
|
(new_id,pid,date,start,row.get('End',''),wtype,duration_min,
|
|
kj(row.get('Aktive Energie (kJ)','')),kj(row.get('Ruheeinträge (kJ)','')),
|
|
tf(row.get('Durchschn. Herzfrequenz (count/min)','')),
|
|
tf(row.get('Max. Herzfrequenz (count/min)','')),
|
|
tf(row.get('Distanz (km)','')),
|
|
training_type_id,training_category,training_subcategory))
|
|
inserted+=1
|
|
|
|
# Phase 1.2: Auto-evaluation after CSV import INSERT
|
|
if EVALUATION_AVAILABLE and training_type_id:
|
|
try:
|
|
# Build activity dict for evaluation
|
|
activity_dict = {
|
|
"id": new_id,
|
|
"profile_id": pid,
|
|
"date": date,
|
|
"training_type_id": training_type_id,
|
|
"duration_min": duration_min,
|
|
"hr_avg": tf(row.get('Durchschn. Herzfrequenz (count/min)','')),
|
|
"hr_max": tf(row.get('Max. Herzfrequenz (count/min)','')),
|
|
"distance_km": tf(row.get('Distanz (km)','')),
|
|
"kcal_active": kj(row.get('Aktive Energie (kJ)','')),
|
|
"kcal_resting": kj(row.get('Ruheeinträge (kJ)','')),
|
|
"rpe": None,
|
|
"pace_min_per_km": None,
|
|
"cadence": None,
|
|
"elevation_gain": None
|
|
}
|
|
evaluate_and_save_activity(cur, new_id, activity_dict, training_type_id, pid)
|
|
logger.debug(f"[AUTO-EVAL] Evaluated imported activity {new_id}")
|
|
except Exception as eval_error:
|
|
logger.warning(f"[AUTO-EVAL] Failed to evaluate imported activity {new_id}: {eval_error}")
|
|
except Exception as e:
|
|
logger.warning(f"Import row failed: {e}")
|
|
skipped+=1
|
|
return {"inserted":inserted,"skipped":skipped,"message":f"{inserted} Trainings importiert"}
|