103 lines
3.4 KiB
Python
103 lines
3.4 KiB
Python
"""
|
|
Pydantic schemas for AI generation endpoints.
|
|
|
|
Request/response models for admin AI generation playground.
|
|
"""
|
|
|
|
from typing import Dict, Literal, Optional
|
|
|
|
from pydantic import BaseModel, Field, field_validator
|
|
|
|
|
|
class AIGeneratePreviewRequest(BaseModel):
|
|
basis_item_id: int = Field(
|
|
..., description="ID of the basis item (must be sedang level)"
|
|
)
|
|
target_level: Literal["mudah", "sulit"] = Field(
|
|
..., description="Target difficulty level for generated question"
|
|
)
|
|
ai_model: str = Field(
|
|
default="qwen/qwen-2.5-coder-32b-instruct",
|
|
description="AI model to use for generation",
|
|
)
|
|
|
|
|
|
class AIGeneratePreviewResponse(BaseModel):
|
|
success: bool = Field(..., description="Whether generation was successful")
|
|
stem: Optional[str] = None
|
|
options: Optional[Dict[str, str]] = None
|
|
correct: Optional[str] = None
|
|
explanation: Optional[str] = None
|
|
ai_model: Optional[str] = None
|
|
basis_item_id: Optional[int] = None
|
|
target_level: Optional[str] = None
|
|
error: Optional[str] = None
|
|
cached: bool = False
|
|
|
|
|
|
class AISaveRequest(BaseModel):
|
|
stem: str = Field(..., description="Question stem")
|
|
options: Dict[str, str] = Field(
|
|
..., description="Answer options (A, B, C, D)"
|
|
)
|
|
correct: str = Field(..., description="Correct answer (A/B/C/D)")
|
|
explanation: Optional[str] = None
|
|
tryout_id: str = Field(..., description="Tryout identifier")
|
|
website_id: int = Field(..., description="Website identifier")
|
|
basis_item_id: int = Field(..., description="Basis item ID")
|
|
slot: int = Field(..., description="Question slot position")
|
|
level: Literal["mudah", "sedang", "sulit"] = Field(
|
|
..., description="Difficulty level"
|
|
)
|
|
ai_model: str = Field(
|
|
default="qwen/qwen-2.5-coder-32b-instruct",
|
|
description="AI model used for generation",
|
|
)
|
|
|
|
@field_validator("correct")
|
|
@classmethod
|
|
def validate_correct(cls, v: str) -> str:
|
|
if v.upper() not in ["A", "B", "C", "D"]:
|
|
raise ValueError("Correct answer must be A, B, C, or D")
|
|
return v.upper()
|
|
|
|
@field_validator("options")
|
|
@classmethod
|
|
def validate_options(cls, v: Dict[str, str]) -> Dict[str, str]:
|
|
required_keys = {"A", "B", "C", "D"}
|
|
if not required_keys.issubset(set(v.keys())):
|
|
raise ValueError("Options must contain keys A, B, C, D")
|
|
return v
|
|
|
|
|
|
class AISaveResponse(BaseModel):
|
|
success: bool = Field(..., description="Whether save was successful")
|
|
item_id: Optional[int] = None
|
|
error: Optional[str] = None
|
|
|
|
|
|
class AIStatsResponse(BaseModel):
|
|
total_ai_items: int = Field(..., description="Total AI-generated items")
|
|
items_by_model: Dict[str, int] = Field(
|
|
default_factory=dict, description="Items count by AI model"
|
|
)
|
|
cache_hit_rate: float = Field(
|
|
default=0.0, description="Cache hit rate (0.0 to 1.0)"
|
|
)
|
|
total_cache_hits: int = Field(default=0, description="Total cache hits")
|
|
total_requests: int = Field(default=0, description="Total generation requests")
|
|
|
|
|
|
class GeneratedQuestion(BaseModel):
|
|
stem: str
|
|
options: Dict[str, str]
|
|
correct: str
|
|
explanation: Optional[str] = None
|
|
|
|
@field_validator("correct")
|
|
@classmethod
|
|
def validate_correct(cls, v: str) -> str:
|
|
if v.upper() not in ["A", "B", "C", "D"]:
|
|
raise ValueError("Correct answer must be A, B, C, or D")
|
|
return v.upper()
|