Files
KPI-management/app/api/llm.py
DonaldFang 方士碩 f810ddc2ea Initial commit: KPI Management System Backend
Features:
- FastAPI backend with JWT authentication
- MySQL database with SQLAlchemy ORM
- KPI workflow: draft → pending → approved → evaluation → completed
- Ollama LLM API integration for AI features
- Gitea API integration for version control
- Complete API endpoints for KPI, dashboard, notifications

Tables: KPI_D_* prefix naming convention

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-11 16:20:57 +08:00

141 lines
3.0 KiB
Python

"""
LLM API
"""
from typing import Optional, List
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import StreamingResponse
from pydantic import BaseModel
from app.api.deps import get_current_user
from app.models.employee import Employee
from app.services.llm_service import llm_service
router = APIRouter(prefix="/api/llm", tags=["LLM"])
class ChatMessage(BaseModel):
"""聊天訊息"""
role: str # system, user, assistant
content: str
class ChatRequest(BaseModel):
"""聊天請求"""
messages: List[ChatMessage]
model: Optional[str] = None
temperature: float = 0.7
stream: bool = False
class ChatResponse(BaseModel):
"""聊天回應"""
content: str
model: str
class SimpleAskRequest(BaseModel):
"""簡單問答請求"""
question: str
system_prompt: Optional[str] = "You are a helpful assistant."
model: Optional[str] = None
class KPIAnalyzeRequest(BaseModel):
"""KPI 分析請求"""
kpi_data: dict
@router.get("/models")
def list_models(current_user: Employee = Depends(get_current_user)):
"""
列出可用的 LLM 模型
"""
models = llm_service.list_models()
return {"models": models}
@router.post("/chat", response_model=ChatResponse)
def chat(
data: ChatRequest,
current_user: Employee = Depends(get_current_user),
):
"""
聊天完成請求
"""
if data.stream:
raise HTTPException(
status_code=400,
detail="請使用 /chat/stream 端點進行串流請求",
)
messages = [{"role": m.role, "content": m.content} for m in data.messages]
content = llm_service.chat(
messages=messages,
model=data.model,
temperature=data.temperature,
)
return ChatResponse(
content=content,
model=data.model or llm_service.default_model,
)
@router.post("/chat/stream")
async def chat_stream(
data: ChatRequest,
current_user: Employee = Depends(get_current_user),
):
"""
串流聊天請求
"""
messages = [{"role": m.role, "content": m.content} for m in data.messages]
def generate():
for chunk in llm_service.chat_stream(
messages=messages,
model=data.model,
temperature=data.temperature,
):
yield f"data: {chunk}\n\n"
yield "data: [DONE]\n\n"
return StreamingResponse(
generate(),
media_type="text/event-stream",
)
@router.post("/ask")
def simple_ask(
data: SimpleAskRequest,
current_user: Employee = Depends(get_current_user),
):
"""
簡單問答
"""
response = llm_service.simple_ask(
question=data.question,
system_prompt=data.system_prompt,
model=data.model,
)
return {"answer": response}
@router.post("/analyze-kpi")
def analyze_kpi(
data: KPIAnalyzeRequest,
current_user: Employee = Depends(get_current_user),
):
"""
AI 分析 KPI 數據
"""
analysis = llm_service.analyze_kpi(data.kpi_data)
return {"analysis": analysis}