293 lines
8.8 KiB
Python
293 lines
8.8 KiB
Python
"""
|
|
AI Generation Router.
|
|
|
|
Admin endpoints for AI question generation playground.
|
|
"""
|
|
|
|
import logging
|
|
from typing import Annotated
|
|
|
|
from fastapi import APIRouter, Depends, HTTPException, status
|
|
from sqlalchemy import and_, select
|
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
|
|
from app.database import get_db
|
|
from app.models.item import Item
|
|
from app.schemas.ai import (
|
|
AIGeneratePreviewRequest,
|
|
AIGeneratePreviewResponse,
|
|
AISaveRequest,
|
|
AISaveResponse,
|
|
AIStatsResponse,
|
|
)
|
|
from app.services.ai_generation import (
|
|
generate_question,
|
|
get_ai_stats,
|
|
save_ai_question,
|
|
validate_ai_model,
|
|
)
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
router = APIRouter(prefix="/admin/ai", tags=["admin", "ai-generation"])
|
|
|
|
|
|
@router.post(
|
|
"/generate-preview",
|
|
response_model=AIGeneratePreviewResponse,
|
|
summary="Preview AI-generated question",
|
|
description="""
|
|
Generate a question preview using AI without saving to database.
|
|
|
|
This is an admin playground endpoint for testing AI generation quality.
|
|
Admins can retry unlimited times until satisfied with the result.
|
|
|
|
Requirements:
|
|
- basis_item_id must reference an existing item at 'sedang' level
|
|
- target_level must be 'mudah' or 'sulit'
|
|
- ai_model must be a supported OpenRouter model
|
|
""",
|
|
responses={
|
|
200: {"description": "Question generated successfully (preview mode)"},
|
|
400: {"description": "Invalid request (wrong level, unsupported model)"},
|
|
404: {"description": "Basis item not found"},
|
|
500: {"description": "AI generation failed"},
|
|
},
|
|
)
|
|
async def generate_preview(
|
|
request: AIGeneratePreviewRequest,
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
) -> AIGeneratePreviewResponse:
|
|
"""
|
|
Generate AI question preview (no database save).
|
|
|
|
- **basis_item_id**: ID of the sedang-level question to base generation on
|
|
- **target_level**: Target difficulty (mudah/sulit)
|
|
- **ai_model**: OpenRouter model to use (default: qwen/qwen-2.5-coder-32b-instruct)
|
|
"""
|
|
# Validate AI model
|
|
if not validate_ai_model(request.ai_model):
|
|
raise HTTPException(
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
detail=f"Unsupported AI model: {request.ai_model}. "
|
|
f"Supported models: qwen/qwen-2.5-coder-32b-instruct, meta-llama/llama-3.3-70b-instruct",
|
|
)
|
|
|
|
# Fetch basis item
|
|
result = await db.execute(
|
|
select(Item).where(Item.id == request.basis_item_id)
|
|
)
|
|
basis_item = result.scalar_one_or_none()
|
|
|
|
if not basis_item:
|
|
raise HTTPException(
|
|
status_code=status.HTTP_404_NOT_FOUND,
|
|
detail=f"Basis item not found: {request.basis_item_id}",
|
|
)
|
|
|
|
# Validate basis item is sedang level
|
|
if basis_item.level != "sedang":
|
|
raise HTTPException(
|
|
status_code=status.HTTP_400_BAD_REQUEST,
|
|
detail=f"Basis item must be 'sedang' level, got: {basis_item.level}",
|
|
)
|
|
|
|
# Generate question
|
|
try:
|
|
generated = await generate_question(
|
|
basis_item=basis_item,
|
|
target_level=request.target_level,
|
|
ai_model=request.ai_model,
|
|
)
|
|
|
|
if not generated:
|
|
return AIGeneratePreviewResponse(
|
|
success=False,
|
|
error="AI generation failed. Please check logs or try again.",
|
|
ai_model=request.ai_model,
|
|
basis_item_id=request.basis_item_id,
|
|
target_level=request.target_level,
|
|
)
|
|
|
|
return AIGeneratePreviewResponse(
|
|
success=True,
|
|
stem=generated.stem,
|
|
options=generated.options,
|
|
correct=generated.correct,
|
|
explanation=generated.explanation,
|
|
ai_model=request.ai_model,
|
|
basis_item_id=request.basis_item_id,
|
|
target_level=request.target_level,
|
|
cached=False,
|
|
)
|
|
|
|
except Exception as e:
|
|
logger.error(f"AI preview generation failed: {e}")
|
|
return AIGeneratePreviewResponse(
|
|
success=False,
|
|
error=f"AI generation error: {str(e)}",
|
|
ai_model=request.ai_model,
|
|
basis_item_id=request.basis_item_id,
|
|
target_level=request.target_level,
|
|
)
|
|
|
|
|
|
@router.post(
|
|
"/generate-save",
|
|
response_model=AISaveResponse,
|
|
summary="Save AI-generated question",
|
|
description="""
|
|
Save an AI-generated question to the database.
|
|
|
|
This endpoint creates a new Item record with:
|
|
- generated_by='ai'
|
|
- ai_model from request
|
|
- basis_item_id linking to original question
|
|
- calibrated=False (will be calculated later)
|
|
""",
|
|
responses={
|
|
200: {"description": "Question saved successfully"},
|
|
400: {"description": "Invalid request data"},
|
|
404: {"description": "Basis item or tryout not found"},
|
|
409: {"description": "Item already exists at this slot/level"},
|
|
500: {"description": "Database save failed"},
|
|
},
|
|
)
|
|
async def generate_save(
|
|
request: AISaveRequest,
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
) -> AISaveResponse:
|
|
"""
|
|
Save AI-generated question to database.
|
|
|
|
- **stem**: Question text
|
|
- **options**: Dict with A, B, C, D options
|
|
- **correct**: Correct answer (A/B/C/D)
|
|
- **explanation**: Answer explanation (optional)
|
|
- **tryout_id**: Tryout identifier
|
|
- **website_id**: Website identifier
|
|
- **basis_item_id**: Original item ID this was generated from
|
|
- **slot**: Question slot position
|
|
- **level**: Difficulty level
|
|
- **ai_model**: AI model used for generation
|
|
"""
|
|
# Verify basis item exists
|
|
basis_result = await db.execute(
|
|
select(Item).where(Item.id == request.basis_item_id)
|
|
)
|
|
basis_item = basis_result.scalar_one_or_none()
|
|
|
|
if not basis_item:
|
|
raise HTTPException(
|
|
status_code=status.HTTP_404_NOT_FOUND,
|
|
detail=f"Basis item not found: {request.basis_item_id}",
|
|
)
|
|
|
|
# Check for duplicate (same tryout, website, slot, level)
|
|
existing_result = await db.execute(
|
|
select(Item).where(
|
|
and_(
|
|
Item.tryout_id == request.tryout_id,
|
|
Item.website_id == request.website_id,
|
|
Item.slot == request.slot,
|
|
Item.level == request.level,
|
|
)
|
|
)
|
|
)
|
|
existing = existing_result.scalar_one_or_none()
|
|
|
|
if existing:
|
|
raise HTTPException(
|
|
status_code=status.HTTP_409_CONFLICT,
|
|
detail=f"Item already exists at slot={request.slot}, level={request.level} "
|
|
f"for tryout={request.tryout_id}",
|
|
)
|
|
|
|
# Create GeneratedQuestion from request
|
|
from app.schemas.ai import GeneratedQuestion
|
|
|
|
generated_data = GeneratedQuestion(
|
|
stem=request.stem,
|
|
options=request.options,
|
|
correct=request.correct,
|
|
explanation=request.explanation,
|
|
)
|
|
|
|
# Save to database
|
|
item_id = await save_ai_question(
|
|
generated_data=generated_data,
|
|
tryout_id=request.tryout_id,
|
|
website_id=request.website_id,
|
|
basis_item_id=request.basis_item_id,
|
|
slot=request.slot,
|
|
level=request.level,
|
|
ai_model=request.ai_model,
|
|
db=db,
|
|
)
|
|
|
|
if not item_id:
|
|
raise HTTPException(
|
|
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
|
detail="Failed to save AI-generated question",
|
|
)
|
|
|
|
return AISaveResponse(
|
|
success=True,
|
|
item_id=item_id,
|
|
)
|
|
|
|
|
|
@router.get(
|
|
"/stats",
|
|
response_model=AIStatsResponse,
|
|
summary="Get AI generation statistics",
|
|
description="""
|
|
Get statistics about AI-generated questions.
|
|
|
|
Returns:
|
|
- Total AI-generated items count
|
|
- Items count by model
|
|
- Cache hit rate (placeholder)
|
|
""",
|
|
)
|
|
async def get_stats(
|
|
db: Annotated[AsyncSession, Depends(get_db)],
|
|
) -> AIStatsResponse:
|
|
"""
|
|
Get AI generation statistics.
|
|
"""
|
|
stats = await get_ai_stats(db)
|
|
|
|
return AIStatsResponse(
|
|
total_ai_items=stats["total_ai_items"],
|
|
items_by_model=stats["items_by_model"],
|
|
cache_hit_rate=stats["cache_hit_rate"],
|
|
total_cache_hits=stats["total_cache_hits"],
|
|
total_requests=stats["total_requests"],
|
|
)
|
|
|
|
|
|
@router.get(
|
|
"/models",
|
|
summary="List supported AI models",
|
|
description="Returns list of supported AI models for question generation.",
|
|
)
|
|
async def list_models() -> dict:
|
|
"""
|
|
List supported AI models.
|
|
"""
|
|
return {
|
|
"models": [
|
|
{
|
|
"id": "qwen/qwen-2.5-coder-32b-instruct",
|
|
"name": "Qwen 2.5 Coder 32B",
|
|
"description": "Fast and efficient model for question generation",
|
|
},
|
|
{
|
|
"id": "meta-llama/llama-3.3-70b-instruct",
|
|
"name": "Llama 3.3 70B",
|
|
"description": "High-quality model with better reasoning",
|
|
},
|
|
]
|
|
}
|