import math import os import sys import pytest # Ensure project root is importable when tests run in isolated environments. sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..")) from app.services.normalization import apply_normalization @pytest.mark.parametrize( ("nm", "rataan", "sb", "expected"), [ (500, 500, 100, 500), (600, 500, 100, 600), (400, 500, 100, 400), (1000, 500, 100, 1000), (0, 500, 100, 0), (500, 600, 80, 375), ], ) def test_apply_normalization_nominal_cases(nm: int, rataan: float, sb: float, expected: int): assert apply_normalization(nm, rataan, sb) == expected @pytest.mark.parametrize("nm", [-1, 1001, 1500, -100]) def test_apply_normalization_rejects_invalid_nm(nm: int): with pytest.raises(ValueError): apply_normalization(nm, 500, 100) @pytest.mark.parametrize("sb", [0, -1, -100.0]) def test_apply_normalization_returns_default_when_sd_non_positive(sb: float): assert apply_normalization(500, 500, sb) == 500 def test_dynamic_normalization_distribution_behaves_as_expected(): nm_scores = [450, 480, 500, 520, 550, 480, 510, 490, 530, 470] mean = sum(nm_scores) / len(nm_scores) variance = sum((x - mean) ** 2 for x in nm_scores) / len(nm_scores) std = math.sqrt(variance) nn_scores = [apply_normalization(nm, mean, std) for nm in nm_scores] nn_mean = sum(nn_scores) / len(nn_scores) nn_variance = sum((x - nn_mean) ** 2 for x in nn_scores) / len(nn_scores) nn_std = math.sqrt(nn_variance) # Rounding in apply_normalization introduces small drift; these bounds are tight. assert abs(nn_mean - 500) <= 5 assert abs(nn_std - 100) <= 5 def test_incremental_population_stats_match_batch_stats(): scores = [500, 550, 450, 600, 400] participant_count = 0 total_nm_sum = 0.0 total_nm_sq_sum = 0.0 for score in scores: participant_count += 1 total_nm_sum += score total_nm_sq_sum += score * score incremental_mean = total_nm_sum / participant_count incremental_variance = (total_nm_sq_sum / participant_count) - (incremental_mean**2) incremental_std = math.sqrt(max(0.0, incremental_variance)) batch_mean = sum(scores) / len(scores) batch_variance = sum((x - batch_mean) ** 2 for x in scores) / len(scores) batch_std = math.sqrt(batch_variance) assert incremental_mean == pytest.approx(batch_mean, rel=0, abs=1e-10) assert incremental_std == pytest.approx(batch_std, rel=0, abs=1e-10)