first commit

This commit is contained in:
Dwindi Ramadhana
2026-03-21 23:32:59 +07:00
commit cf193d7ea0
57 changed files with 17871 additions and 0 deletions

65
app/schemas/__init__.py Normal file
View File

@@ -0,0 +1,65 @@
"""
Pydantic schemas package.
"""
from app.schemas.ai import (
AIGeneratePreviewRequest,
AIGeneratePreviewResponse,
AISaveRequest,
AISaveResponse,
AIStatsResponse,
GeneratedQuestion,
)
from app.schemas.session import (
SessionCompleteRequest,
SessionCompleteResponse,
SessionCreateRequest,
SessionResponse,
UserAnswerInput,
UserAnswerOutput,
)
from app.schemas.tryout import (
NormalizationUpdateRequest,
NormalizationUpdateResponse,
TryoutConfigBrief,
TryoutConfigResponse,
TryoutStatsResponse,
)
from app.schemas.wordpress import (
SyncStatsResponse,
SyncUsersResponse,
UserListResponse,
VerifySessionRequest,
VerifySessionResponse,
WordPressUserResponse,
)
__all__ = [
# AI schemas
"AIGeneratePreviewRequest",
"AIGeneratePreviewResponse",
"AISaveRequest",
"AISaveResponse",
"AIStatsResponse",
"GeneratedQuestion",
# Session schemas
"UserAnswerInput",
"UserAnswerOutput",
"SessionCompleteRequest",
"SessionCompleteResponse",
"SessionCreateRequest",
"SessionResponse",
# Tryout schemas
"TryoutConfigResponse",
"TryoutStatsResponse",
"TryoutConfigBrief",
"NormalizationUpdateRequest",
"NormalizationUpdateResponse",
# WordPress schemas
"SyncStatsResponse",
"SyncUsersResponse",
"UserListResponse",
"VerifySessionRequest",
"VerifySessionResponse",
"WordPressUserResponse",
]

102
app/schemas/ai.py Normal file
View File

@@ -0,0 +1,102 @@
"""
Pydantic schemas for AI generation endpoints.
Request/response models for admin AI generation playground.
"""
from typing import Dict, Literal, Optional
from pydantic import BaseModel, Field, field_validator
class AIGeneratePreviewRequest(BaseModel):
basis_item_id: int = Field(
..., description="ID of the basis item (must be sedang level)"
)
target_level: Literal["mudah", "sulit"] = Field(
..., description="Target difficulty level for generated question"
)
ai_model: str = Field(
default="qwen/qwen-2.5-coder-32b-instruct",
description="AI model to use for generation",
)
class AIGeneratePreviewResponse(BaseModel):
success: bool = Field(..., description="Whether generation was successful")
stem: Optional[str] = None
options: Optional[Dict[str, str]] = None
correct: Optional[str] = None
explanation: Optional[str] = None
ai_model: Optional[str] = None
basis_item_id: Optional[int] = None
target_level: Optional[str] = None
error: Optional[str] = None
cached: bool = False
class AISaveRequest(BaseModel):
stem: str = Field(..., description="Question stem")
options: Dict[str, str] = Field(
..., description="Answer options (A, B, C, D)"
)
correct: str = Field(..., description="Correct answer (A/B/C/D)")
explanation: Optional[str] = None
tryout_id: str = Field(..., description="Tryout identifier")
website_id: int = Field(..., description="Website identifier")
basis_item_id: int = Field(..., description="Basis item ID")
slot: int = Field(..., description="Question slot position")
level: Literal["mudah", "sedang", "sulit"] = Field(
..., description="Difficulty level"
)
ai_model: str = Field(
default="qwen/qwen-2.5-coder-32b-instruct",
description="AI model used for generation",
)
@field_validator("correct")
@classmethod
def validate_correct(cls, v: str) -> str:
if v.upper() not in ["A", "B", "C", "D"]:
raise ValueError("Correct answer must be A, B, C, or D")
return v.upper()
@field_validator("options")
@classmethod
def validate_options(cls, v: Dict[str, str]) -> Dict[str, str]:
required_keys = {"A", "B", "C", "D"}
if not required_keys.issubset(set(v.keys())):
raise ValueError("Options must contain keys A, B, C, D")
return v
class AISaveResponse(BaseModel):
success: bool = Field(..., description="Whether save was successful")
item_id: Optional[int] = None
error: Optional[str] = None
class AIStatsResponse(BaseModel):
total_ai_items: int = Field(..., description="Total AI-generated items")
items_by_model: Dict[str, int] = Field(
default_factory=dict, description="Items count by AI model"
)
cache_hit_rate: float = Field(
default=0.0, description="Cache hit rate (0.0 to 1.0)"
)
total_cache_hits: int = Field(default=0, description="Total cache hits")
total_requests: int = Field(default=0, description="Total generation requests")
class GeneratedQuestion(BaseModel):
stem: str
options: Dict[str, str]
correct: str
explanation: Optional[str] = None
@field_validator("correct")
@classmethod
def validate_correct(cls, v: str) -> str:
if v.upper() not in ["A", "B", "C", "D"]:
raise ValueError("Correct answer must be A, B, C, or D")
return v.upper()

264
app/schemas/report.py Normal file
View File

@@ -0,0 +1,264 @@
"""
Pydantic schemas for Report API endpoints.
"""
from datetime import datetime
from typing import Any, Dict, List, Literal, Optional
from pydantic import BaseModel, Field
# =============================================================================
# Student Performance Report Schemas
# =============================================================================
class StudentPerformanceRecordOutput(BaseModel):
"""Individual student performance record output."""
session_id: str
wp_user_id: str
tryout_id: str
NM: Optional[int] = None
NN: Optional[int] = None
theta: Optional[float] = None
theta_se: Optional[float] = None
total_benar: int
time_spent: int # Total time in seconds
start_time: Optional[datetime] = None
end_time: Optional[datetime] = None
scoring_mode_used: str
rataan_used: Optional[float] = None
sb_used: Optional[float] = None
class AggregatePerformanceStatsOutput(BaseModel):
"""Aggregate statistics for student performance output."""
tryout_id: str
participant_count: int
avg_nm: Optional[float] = None
std_nm: Optional[float] = None
min_nm: Optional[int] = None
max_nm: Optional[int] = None
median_nm: Optional[float] = None
avg_nn: Optional[float] = None
std_nn: Optional[float] = None
avg_theta: Optional[float] = None
pass_rate: float # Percentage with NN >= 500
avg_time_spent: float # Average time in seconds
class StudentPerformanceReportOutput(BaseModel):
"""Complete student performance report output."""
generated_at: datetime
tryout_id: str
website_id: int
date_range: Optional[Dict[str, str]] = None
aggregate: AggregatePerformanceStatsOutput
individual_records: List[StudentPerformanceRecordOutput] = []
class StudentPerformanceReportRequest(BaseModel):
"""Request schema for student performance report."""
tryout_id: str = Field(..., description="Tryout identifier")
website_id: int = Field(..., description="Website identifier")
date_start: Optional[datetime] = Field(None, description="Filter by start date")
date_end: Optional[datetime] = Field(None, description="Filter by end date")
format_type: Literal["individual", "aggregate", "both"] = Field(
default="both", description="Report format"
)
# =============================================================================
# Item Analysis Report Schemas
# =============================================================================
class ItemAnalysisRecordOutput(BaseModel):
"""Item analysis record output for a single item."""
item_id: int
slot: int
level: str
ctt_p: Optional[float] = None
ctt_bobot: Optional[float] = None
ctt_category: Optional[str] = None
irt_b: Optional[float] = None
irt_se: Optional[float] = None
calibrated: bool
calibration_sample_size: int
correctness_rate: float
item_total_correlation: Optional[float] = None
information_values: Dict[float, float] = Field(default_factory=dict)
optimal_theta_range: str = "N/A"
class ItemAnalysisReportOutput(BaseModel):
"""Complete item analysis report output."""
generated_at: datetime
tryout_id: str
website_id: int
total_items: int
items: List[ItemAnalysisRecordOutput]
summary: Dict[str, Any]
class ItemAnalysisReportRequest(BaseModel):
"""Request schema for item analysis report."""
tryout_id: str = Field(..., description="Tryout identifier")
website_id: int = Field(..., description="Website identifier")
filter_by: Optional[Literal["difficulty", "calibrated", "discrimination"]] = Field(
None, description="Filter items by category"
)
difficulty_level: Optional[Literal["mudah", "sedang", "sulit"]] = Field(
None, description="Filter by difficulty level (only when filter_by='difficulty')"
)
# =============================================================================
# Calibration Status Report Schemas
# =============================================================================
class CalibrationItemStatusOutput(BaseModel):
"""Calibration status for a single item output."""
item_id: int
slot: int
level: str
sample_size: int
calibrated: bool
irt_b: Optional[float] = None
irt_se: Optional[float] = None
ctt_p: Optional[float] = None
class CalibrationStatusReportOutput(BaseModel):
"""Complete calibration status report output."""
generated_at: datetime
tryout_id: str
website_id: int
total_items: int
calibrated_items: int
calibration_percentage: float
items_awaiting_calibration: List[CalibrationItemStatusOutput]
avg_calibration_sample_size: float
estimated_time_to_90_percent: Optional[str] = None
ready_for_irt_rollout: bool
items: List[CalibrationItemStatusOutput]
class CalibrationStatusReportRequest(BaseModel):
"""Request schema for calibration status report."""
tryout_id: str = Field(..., description="Tryout identifier")
website_id: int = Field(..., description="Website identifier")
# =============================================================================
# Tryout Comparison Report Schemas
# =============================================================================
class TryoutComparisonRecordOutput(BaseModel):
"""Tryout comparison data point output."""
tryout_id: str
date: Optional[str] = None
subject: Optional[str] = None
participant_count: int
avg_nm: Optional[float] = None
avg_nn: Optional[float] = None
avg_theta: Optional[float] = None
std_nm: Optional[float] = None
calibration_percentage: float
class TryoutComparisonReportOutput(BaseModel):
"""Complete tryout comparison report output."""
generated_at: datetime
comparison_type: Literal["date", "subject"]
tryouts: List[TryoutComparisonRecordOutput]
trends: Optional[Dict[str, Any]] = None
normalization_impact: Optional[Dict[str, Any]] = None
class TryoutComparisonReportRequest(BaseModel):
"""Request schema for tryout comparison report."""
tryout_ids: List[str] = Field(..., min_length=2, description="List of tryout IDs to compare")
website_id: int = Field(..., description="Website identifier")
group_by: Literal["date", "subject"] = Field(
default="date", description="Group comparison by date or subject"
)
# =============================================================================
# Report Scheduling Schemas
# =============================================================================
class ReportScheduleRequest(BaseModel):
"""Request schema for scheduling a report."""
report_type: Literal["student_performance", "item_analysis", "calibration_status", "tryout_comparison"] = Field(
..., description="Type of report to generate"
)
schedule: Literal["daily", "weekly", "monthly"] = Field(
..., description="Schedule frequency"
)
tryout_ids: List[str] = Field(..., description="List of tryout IDs for the report")
website_id: int = Field(..., description="Website identifier")
recipients: List[str] = Field(..., description="List of email addresses to send report to")
export_format: Literal["csv", "xlsx", "pdf"] = Field(
default="xlsx", description="Export format for the report"
)
class ReportScheduleOutput(BaseModel):
"""Output schema for scheduled report."""
schedule_id: str
report_type: str
schedule: str
tryout_ids: List[str]
website_id: int
recipients: List[str]
format: str
created_at: datetime
last_run: Optional[datetime] = None
next_run: Optional[datetime] = None
is_active: bool
class ReportScheduleResponse(BaseModel):
"""Response schema for schedule creation."""
schedule_id: str
message: str
next_run: Optional[datetime] = None
# =============================================================================
# Export Schemas
# =============================================================================
class ExportRequest(BaseModel):
"""Request schema for exporting a report."""
schedule_id: str = Field(..., description="Schedule ID to generate report for")
export_format: Literal["csv", "xlsx", "pdf"] = Field(
default="xlsx", description="Export format"
)
class ExportResponse(BaseModel):
"""Response schema for export request."""
file_path: str
file_name: str
format: str
generated_at: datetime
download_url: Optional[str] = None

108
app/schemas/session.py Normal file
View File

@@ -0,0 +1,108 @@
"""
Pydantic schemas for Session API endpoints.
"""
from datetime import datetime
from typing import List, Literal, Optional
from pydantic import BaseModel, Field
class UserAnswerInput(BaseModel):
"""Input schema for a single user answer."""
item_id: int = Field(..., description="Item/question ID")
response: str = Field(..., min_length=1, max_length=10, description="User's answer (A, B, C, D)")
time_spent: int = Field(default=0, ge=0, description="Time spent on this question (seconds)")
class SessionCompleteRequest(BaseModel):
"""Request schema for completing a session."""
end_time: datetime = Field(..., description="Session end timestamp")
user_answers: List[UserAnswerInput] = Field(..., description="List of user answers")
class UserAnswerOutput(BaseModel):
"""Output schema for a single user answer."""
id: int
item_id: int
response: str
is_correct: bool
time_spent: int
bobot_earned: float
scoring_mode_used: str
model_config = {"from_attributes": True}
class SessionCompleteResponse(BaseModel):
"""Response schema for completed session with CTT scores."""
id: int
session_id: str
wp_user_id: str
website_id: int
tryout_id: str
start_time: datetime
end_time: Optional[datetime]
is_completed: bool
scoring_mode_used: str
# CTT scores
total_benar: int = Field(description="Total correct answers")
total_bobot_earned: float = Field(description="Total weight earned")
NM: Optional[int] = Field(description="Nilai Mentah (raw score) [0, 1000]")
NN: Optional[int] = Field(description="Nilai Nasional (normalized score) [0, 1000]")
# Normalization metadata
rataan_used: Optional[float] = Field(description="Mean value used for normalization")
sb_used: Optional[float] = Field(description="Standard deviation used for normalization")
# User answers
user_answers: List[UserAnswerOutput]
model_config = {"from_attributes": True}
class SessionCreateRequest(BaseModel):
"""Request schema for creating a new session."""
session_id: str = Field(..., description="Unique session identifier")
wp_user_id: str = Field(..., description="WordPress user ID")
website_id: int = Field(..., description="Website identifier")
tryout_id: str = Field(..., description="Tryout identifier")
scoring_mode: Literal["ctt", "irt", "hybrid"] = Field(
default="ctt", description="Scoring mode for this session"
)
class SessionResponse(BaseModel):
"""Response schema for session data."""
id: int
session_id: str
wp_user_id: str
website_id: int
tryout_id: str
start_time: datetime
end_time: Optional[datetime]
is_completed: bool
scoring_mode_used: str
# CTT scores (populated after completion)
total_benar: int
total_bobot_earned: float
NM: Optional[int]
NN: Optional[int]
# IRT scores (populated after completion)
theta: Optional[float]
theta_se: Optional[float]
# Normalization metadata
rataan_used: Optional[float]
sb_used: Optional[float]
model_config = {"from_attributes": True}

97
app/schemas/tryout.py Normal file
View File

@@ -0,0 +1,97 @@
"""
Pydantic schemas for Tryout API endpoints.
"""
from datetime import datetime
from typing import List, Literal, Optional
from pydantic import BaseModel, Field
class TryoutConfigResponse(BaseModel):
"""Response schema for tryout configuration."""
id: int
website_id: int
tryout_id: str
name: str
description: Optional[str]
# Scoring configuration
scoring_mode: Literal["ctt", "irt", "hybrid"]
selection_mode: Literal["fixed", "adaptive", "hybrid"]
normalization_mode: Literal["static", "dynamic", "hybrid"]
# Normalization settings
min_sample_for_dynamic: int
static_rataan: float
static_sb: float
# AI generation
ai_generation_enabled: bool
# Hybrid mode settings
hybrid_transition_slot: Optional[int]
# IRT settings
min_calibration_sample: int
theta_estimation_method: Literal["mle", "map", "eap"]
fallback_to_ctt_on_error: bool
# Current stats
current_stats: Optional["TryoutStatsResponse"]
# Timestamps
created_at: datetime
updated_at: datetime
model_config = {"from_attributes": True}
class TryoutStatsResponse(BaseModel):
"""Response schema for tryout statistics."""
participant_count: int
rataan: Optional[float]
sb: Optional[float]
min_nm: Optional[int]
max_nm: Optional[int]
last_calculated: Optional[datetime]
model_config = {"from_attributes": True}
class TryoutConfigBrief(BaseModel):
"""Brief tryout config for list responses."""
tryout_id: str
name: str
scoring_mode: str
selection_mode: str
normalization_mode: str
participant_count: Optional[int] = None
model_config = {"from_attributes": True}
class NormalizationUpdateRequest(BaseModel):
"""Request schema for updating normalization settings."""
normalization_mode: Optional[Literal["static", "dynamic", "hybrid"]] = None
static_rataan: Optional[float] = Field(None, ge=0)
static_sb: Optional[float] = Field(None, gt=0)
class NormalizationUpdateResponse(BaseModel):
"""Response schema for normalization update."""
tryout_id: str
normalization_mode: str
static_rataan: float
static_sb: float
will_switch_to_dynamic_at: int
current_participant_count: int
# Update forward reference
TryoutConfigResponse.model_rebuild()

86
app/schemas/wordpress.py Normal file
View File

@@ -0,0 +1,86 @@
"""
Pydantic schemas for WordPress Integration API endpoints.
"""
from datetime import datetime
from typing import Any, List, Optional
from pydantic import BaseModel, Field
class VerifySessionRequest(BaseModel):
"""Request schema for verifying WordPress session."""
wp_user_id: str = Field(..., description="WordPress user ID")
token: str = Field(..., description="WordPress JWT authentication token")
website_id: int = Field(..., description="Website identifier")
class WordPressUserResponse(BaseModel):
"""Response schema for WordPress user data."""
id: int = Field(..., description="Local database user ID")
wp_user_id: str = Field(..., description="WordPress user ID")
website_id: int = Field(..., description="Website identifier")
created_at: datetime = Field(..., description="User creation timestamp")
updated_at: datetime = Field(..., description="User last update timestamp")
model_config = {"from_attributes": True}
class VerifySessionResponse(BaseModel):
"""Response schema for session verification."""
valid: bool = Field(..., description="Whether the session is valid")
user: Optional[WordPressUserResponse] = Field(
default=None, description="User data if session is valid"
)
error: Optional[str] = Field(
default=None, description="Error message if session is invalid"
)
wp_user_info: Optional[dict[str, Any]] = Field(
default=None, description="WordPress user info from API"
)
class SyncUsersRequest(BaseModel):
"""Request schema for user synchronization (optional body)."""
pass
class SyncStatsResponse(BaseModel):
"""Response schema for user synchronization statistics."""
inserted: int = Field(..., description="Number of users inserted")
updated: int = Field(..., description="Number of users updated")
total: int = Field(..., description="Total users processed")
errors: int = Field(default=0, description="Number of errors during sync")
class SyncUsersResponse(BaseModel):
"""Response schema for user synchronization."""
synced: SyncStatsResponse = Field(..., description="Synchronization statistics")
website_id: int = Field(..., description="Website identifier")
message: str = Field(default="Sync completed", description="Status message")
class UserListResponse(BaseModel):
"""Response schema for paginated user list."""
users: List[WordPressUserResponse] = Field(..., description="List of users")
total: int = Field(..., description="Total number of users")
page: int = Field(default=1, description="Current page number")
page_size: int = Field(default=50, description="Number of users per page")
total_pages: int = Field(default=1, description="Total number of pages")
class WordPressErrorDetail(BaseModel):
"""Detail schema for WordPress errors."""
code: str = Field(..., description="Error code")
message: str = Field(..., description="Error message")
details: Optional[dict[str, Any]] = Field(
default=None, description="Additional error details"
)