Add tests for Activity Cluster registration and smoke tests for login functionality
All checks were successful
Deploy Development / deploy (push) Successful in 44s
Build Test / lint-backend (push) Successful in 0s
Build Test / build-frontend (push) Successful in 13s

- Implemented a new test script `test_activity_registration.py` to verify the registration of Activity placeholders, ensuring all expected placeholders are registered, have complete metadata, and correct evidence distribution.
- Created a new smoke test suite `dev-smoke-test.spec.js` to validate the login process, dashboard loading, and navigation to key sections, while checking for critical console errors.
- Added a JSON file `test-results.last-run.json` to track the status of the last test run, indicating failures if any tests do not pass.
This commit is contained in:
Lars 2026-04-03 08:22:08 +02:00
parent 485aec40a0
commit 10f608438c
16 changed files with 514 additions and 0 deletions

63
create_issue_no_jq.sh Normal file
View File

@ -0,0 +1,63 @@
#!/bin/bash
#
# Script zum Anlegen des Gitea Issues ohne jq dependency
#
GITEA_TOKEN="b3d27c7d87d2acf39490d0c58f26922164edb4e8"
GITEA_URL="http://192.168.2.144:3000"
REPO_OWNER="Lars"
REPO_NAME="mitai-jinkendo"
# Issue Body aus Datei lesen (erste 29 Zeilen überspringen = Metadaten)
ISSUE_BODY=$(tail -n +30 "c:/Dev/mitai-jinkendo/.claude/task/rework_0b_placeholder/ISSUE_METADATEN_REVIEW.md" | python3 -c "import sys, json; print(json.dumps(sys.stdin.read()))")
# JSON Payload mit Python erstellen
python3 << PYEOF > /tmp/issue_payload.json
import json
body = $ISSUE_BODY
payload = {
"title": "Placeholder Registry: UNRESOLVED & TO_VERIFY Metadaten prüfen",
"body": body,
"labels": [2, 3]
}
print(json.dumps(payload, ensure_ascii=False))
PYEOF
echo "Erstelle Gitea Issue..."
echo "Repository: $REPO_OWNER/$REPO_NAME"
echo ""
# Issue via API anlegen
RESPONSE=$(curl -s -X POST \
"$GITEA_URL/api/v1/repos/$REPO_OWNER/$REPO_NAME/issues" \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d @/tmp/issue_payload.json)
# Response mit Python parsen
python3 << PYEOF
import json
response = '''$RESPONSE'''
try:
data = json.loads(response)
if 'number' in data:
print(f"✓ Issue erfolgreich erstellt!")
print(f"")
print(f"Issue #{data['number']}")
print(f"URL: {data['html_url']}")
print(f"")
print(f"✓ Fertig!")
else:
print(f"✗ Fehler beim Erstellen des Issues:")
print(json.dumps(data, indent=2))
except Exception as e:
print(f"✗ Fehler: {e}")
print(response)
PYEOF
# Cleanup
rm -f /tmp/issue_payload.json

View File

@ -0,0 +1,78 @@
#!/bin/bash
#
# Script zum Anlegen des Gitea Issues:
# "Placeholder Registry: UNRESOLVED & TO_VERIFY Metadaten prüfen"
#
# Usage: ./create_metadaten_review_issue.sh YOUR_GITEA_TOKEN
#
if [ -z "$1" ]; then
echo "ERROR: Gitea Token erforderlich"
echo "Usage: $0 YOUR_GITEA_TOKEN"
echo ""
echo "Token erstellen:"
echo " 1. Gitea öffnen: http://192.168.2.144:3000"
echo " 2. Settings → Applications → Generate New Token"
echo " 3. Name: 'Claude Code Issue Management'"
echo " 4. Scope: issue (read/write)"
echo " 5. Token kopieren und als Argument übergeben"
exit 1
fi
GITEA_TOKEN="$1"
GITEA_URL="http://192.168.2.144:3000"
REPO_OWNER="Lars"
REPO_NAME="mitai-jinkendo"
# Issue Body aus Datei lesen (erste 30 Zeilen überspringen = Metadaten)
ISSUE_BODY=$(tail -n +30 .claude/task/rework_0b_placeholder/ISSUE_METADATEN_REVIEW.md)
# JSON Payload erstellen
cat > /tmp/gitea_issue_payload.json << EOF
{
"title": "Placeholder Registry: UNRESOLVED & TO_VERIFY Metadaten prüfen",
"body": $(echo "$ISSUE_BODY" | jq -Rs .),
"labels": [1, 2, 3],
"priority": 2
}
EOF
echo "Erstelle Gitea Issue..."
echo "Repository: $REPO_OWNER/$REPO_NAME"
echo ""
# Issue via API anlegen
RESPONSE=$(curl -s -X POST \
"$GITEA_URL/api/v1/repos/$REPO_OWNER/$REPO_NAME/issues" \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d @/tmp/gitea_issue_payload.json)
# Response prüfen
if echo "$RESPONSE" | grep -q '"number"'; then
ISSUE_NUMBER=$(echo "$RESPONSE" | jq -r '.number')
ISSUE_URL=$(echo "$RESPONSE" | jq -r '.html_url')
echo "✓ Issue erfolgreich erstellt!"
echo ""
echo "Issue #$ISSUE_NUMBER"
echo "URL: $ISSUE_URL"
echo ""
# Labels setzen (falls nicht automatisch gesetzt)
echo "Setze Labels..."
curl -s -X POST \
"$GITEA_URL/api/v1/repos/$REPO_OWNER/$REPO_NAME/issues/$ISSUE_NUMBER/labels" \
-H "Authorization: token $GITEA_TOKEN" \
-H "Content-Type: application/json" \
-d '{"labels": [1, 2, 3]}' > /dev/null
echo "✓ Fertig!"
else
echo "✗ Fehler beim Erstellen des Issues:"
echo "$RESPONSE" | jq .
exit 1
fi
# Cleanup
rm -f /tmp/gitea_issue_payload.json

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

75
package-lock.json generated Normal file
View File

@ -0,0 +1,75 @@
{
"name": "mitai-jinkendo",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"devDependencies": {
"@playwright/test": "^1.58.2"
}
},
"node_modules/@playwright/test": {
"version": "1.58.2",
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.58.2.tgz",
"integrity": "sha512-akea+6bHYBBfA9uQqSYmlJXn61cTa+jbO87xVLCWbTqbWadRVmhxlXATaOjOgcBaWU4ePo0wB41KMFv3o35IXA==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"playwright": "1.58.2"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=18"
}
},
"node_modules/fsevents": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
"integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
"dev": true,
"hasInstallScript": true,
"license": "MIT",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
"node_modules/playwright": {
"version": "1.58.2",
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.58.2.tgz",
"integrity": "sha512-vA30H8Nvkq/cPBnNw4Q8TWz1EJyqgpuinBcHET0YVJVFldr8JDNiU9LaWAE1KqSkRYazuaBhTpB5ZzShOezQ6A==",
"dev": true,
"license": "Apache-2.0",
"dependencies": {
"playwright-core": "1.58.2"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=18"
},
"optionalDependencies": {
"fsevents": "2.3.2"
}
},
"node_modules/playwright-core": {
"version": "1.58.2",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.58.2.tgz",
"integrity": "sha512-yZkEtftgwS8CsfYo7nm0KE8jsvm6i/PTgVtB8DL726wNf6H2IMsDuxCpJj59KDaxCtSnrWan2AeDqM7JBaultg==",
"dev": true,
"license": "Apache-2.0",
"bin": {
"playwright-core": "cli.js"
},
"engines": {
"node": ">=18"
}
}
}
}

5
package.json Normal file
View File

@ -0,0 +1,5 @@
{
"devDependencies": {
"@playwright/test": "^1.58.2"
}
}

12
playwright.config.js Normal file
View File

@ -0,0 +1,12 @@
module.exports = {
testDir: './tests',
timeout: 30000,
use: {
channel: 'chrome',
headless: true,
viewport: { width: 390, height: 844 },
screenshot: 'only-on-failure',
baseURL: 'https://dev.mitai.jinkendo.de',
},
reporter: 'list',
};

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,6 @@
{
"status": "failed",
"failedTests": [
"d6ae548bbe32e0652471-816c0db33a38f27f1eaf"
]
}

View File

@ -0,0 +1,202 @@
"""
Test script to verify Activity Cluster placeholder registration.
Verifies:
1. All 17 Activity placeholders are registered
2. All have complete metadata (22 mandatory fields)
3. Evidence distribution is correct
"""
import sys
from pathlib import Path
# Add backend to path
sys.path.insert(0, str(Path(__file__).parent / 'backend'))
# Import registrations (triggers auto-registration)
print("Importing placeholder_registry...")
from placeholder_registry import EvidenceType, get_registry
print("Importing activity_metrics...")
try:
from placeholder_registrations import activity_metrics
print("Activity metrics imported successfully")
except Exception as e:
print(f"ERROR importing activity_metrics: {e}")
import traceback
traceback.print_exc()
METADATA_REGISTRY = get_registry()
print(f"Registry size after import: {len(METADATA_REGISTRY.get_all())}")
# Expected placeholders
EXPECTED_ACTIVITY_PLACEHOLDERS = [
'activity_summary',
'activity_detail',
'trainingstyp_verteilung',
'training_minutes_week',
'training_frequency_7d',
'quality_sessions_pct',
'ability_balance_strength',
'ability_balance_endurance',
'ability_balance_mental',
'ability_balance_coordination',
'ability_balance_mobility',
'proxy_internal_load_7d',
'monotony_score',
'strain_score',
'rest_day_compliance',
'vo2max_trend_28d',
'activity_score',
]
def test_registration():
"""Test that all Activity placeholders are registered."""
print("=== Activity Cluster Registration Test ===\n")
# Check all expected placeholders
registered = []
missing = []
for key in EXPECTED_ACTIVITY_PLACEHOLDERS:
if METADATA_REGISTRY.get(key) is not None:
registered.append(key)
else:
missing.append(key)
print(f"OK Registered: {len(registered)}/17")
if missing:
print(f"FAIL Missing: {len(missing)}/17")
for key in missing:
print(f" - {key}")
return False
print(f"OK All 17 Activity placeholders registered\n")
return True
def test_metadata_completeness():
"""Test that all registered placeholders have complete metadata."""
print("=== Metadata Completeness Test ===\n")
mandatory_fields = [
'key', 'category', 'name_de', 'name_en', 'description_de', 'description_en',
'placeholder_type', 'output_type', 'unit', 'time_window', 'semantic_contract',
'calculation_method', 'source_info', 'data_lineage', 'confidence_logic',
'missing_value_policy', 'known_limitations', 'dependencies',
'layer_2b_reuse_possible', 'example_value'
]
incomplete = []
for key in EXPECTED_ACTIVITY_PLACEHOLDERS:
metadata = METADATA_REGISTRY.get(key)
if metadata is None:
continue
missing_fields = []
for field in mandatory_fields:
value = getattr(metadata, field, None)
if value is None or value == '' or value == []:
missing_fields.append(field)
if missing_fields:
incomplete.append((key, missing_fields))
if incomplete:
print(f"FAIL Incomplete metadata: {len(incomplete)}/17")
for key, fields in incomplete:
print(f" - {key}: missing {fields}")
return False
print(f"OK All 17 placeholders have complete metadata (20 mandatory fields)\n")
return True
def test_evidence_distribution():
"""Test evidence tagging distribution."""
print("=== Evidence Distribution Test ===\n")
evidence_counts = {
EvidenceType.CODE_DERIVED: 0,
EvidenceType.DRAFT_DERIVED: 0,
EvidenceType.MIXED: 0,
EvidenceType.TO_VERIFY: 0,
EvidenceType.UNRESOLVED: 0,
}
total_tags = 0
for key in EXPECTED_ACTIVITY_PLACEHOLDERS:
metadata = METADATA_REGISTRY.get(key)
if metadata is None:
continue
# Count evidence tags (22 fields)
for field in metadata.get_all_evidence_fields():
evidence = metadata.get_evidence(field)
if evidence:
evidence_counts[evidence] = evidence_counts.get(evidence, 0) + 1
total_tags += 1
print(f"Total evidence tags: {total_tags} (expected ~374 = 17 × 22)")
print("\nDistribution:")
for evidence_type, count in evidence_counts.items():
percentage = (count / total_tags * 100) if total_tags > 0 else 0
print(f" {evidence_type.value:15s}: {count:3d} ({percentage:5.1f}%)")
print("\nExpected distribution:")
print(" CODE_DERIVED: ~60% (directly from code)")
print(" DRAFT_DERIVED: ~15% (from canonical draft)")
print(" MIXED: ~15% (combined sources)")
print(" TO_VERIFY: ~10% (needs verification)")
print()
return True
def dump_sample_placeholder():
"""Dump one complete placeholder as sample."""
print("=== Sample Placeholder: activity_score ===\n")
metadata = METADATA_REGISTRY.get('activity_score')
if metadata is None:
print("FAIL activity_score not found in registry")
return False
print(f"Key: {metadata.key}")
print(f"Category: {metadata.category}")
print(f"Name (DE): {metadata.name_de}")
print(f"Name (EN): {metadata.name_en}")
print(f"Type: {metadata.placeholder_type.value}")
print(f"Output: {metadata.output_type.value}")
print(f"Unit: {metadata.unit}")
print(f"Time Window: {metadata.time_window}")
print(f"\nDescription (DE):")
print(f" {metadata.description_de[:100]}...")
print(f"\nSemantic Contract:")
print(f" {metadata.semantic_contract[:100]}...")
print(f"\nCalculation Method:")
print(f" {metadata.calculation_method[:100]}...")
print(f"\nKnown Limitations:")
print(f" {metadata.known_limitations[:150]}...")
print(f"\nDependencies: {len(metadata.dependencies)} items")
print(f"Layer 2b Reuse: {metadata.layer_2b_reuse_possible}")
print()
return True
if __name__ == '__main__':
success = True
success &= test_registration()
success &= test_metadata_completeness()
success &= test_evidence_distribution()
success &= dump_sample_placeholder()
if success:
print("OK All tests passed - Activity Cluster registration is complete and valid")
sys.exit(0)
else:
print("FAIL Some tests failed - see output above")
sys.exit(1)

View File

@ -0,0 +1,65 @@
const { test, expect } = require('@playwright/test');
const TEST_EMAIL = process.env.TEST_EMAIL || 'lars@stommer.com';
const TEST_PASSWORD = process.env.TEST_PASSWORD || '5112';
async function login(page) {
await page.goto('/');
await page.waitForLoadState('networkidle');
await page.fill('input[type="email"]', TEST_EMAIL);
await page.fill('input[type="password"]', TEST_PASSWORD);
await page.click('button:has-text("Anmelden")');
await page.waitForLoadState('networkidle');
}
test('1. Login funktioniert', async ({ page }) => {
await page.goto('/');
await page.fill('input[type="email"]', TEST_EMAIL);
await page.fill('input[type="password"]', TEST_PASSWORD);
await page.click('button:has-text("Anmelden")');
await page.waitForLoadState('networkidle');
const loginButton = page.locator('button:has-text("Anmelden")');
await expect(loginButton).toHaveCount(0, { timeout: 10000 });
await page.screenshot({ path: 'screenshots/01-nach-login.png' });
console.log('Login erfolgreich');
});
test('2. Dashboard laedt ohne Fehler', async ({ page }) => {
await login(page);
await expect(page.locator('.spinner')).toHaveCount(0, { timeout: 10000 });
await page.screenshot({ path: 'screenshots/02-dashboard.png' });
console.log('Dashboard OK');
});
test('3. Erfassung erreichbar', async ({ page }) => {
await login(page);
await page.click('text=Erfassung');
await page.waitForLoadState('networkidle');
await page.screenshot({ path: 'screenshots/03-erfassung.png' });
console.log('Erfassung OK');
});
test('4. Analyse erreichbar', async ({ page }) => {
await login(page);
await page.click('text=Analyse');
await page.waitForLoadState('networkidle');
await page.screenshot({ path: 'screenshots/04-analyse.png' });
console.log('Analyse OK');
});
test('5. Keine kritischen Console-Fehler', async ({ page }) => {
const errors = [];
page.on('console', msg => {
if (msg.type() === 'error') errors.push(msg.text());
});
await login(page);
await page.waitForLoadState('networkidle');
const kritisch = errors.filter(e =>
!e.includes('favicon') && !e.includes('sourceMap') && !e.includes('404')
);
if (kritisch.length > 0) {
console.log('Console-Fehler:', kritisch.join(', '));
} else {
console.log('Keine kritischen Console-Fehler');
}
});