Initial commit: KPI Management System Backend
Features: - FastAPI backend with JWT authentication - MySQL database with SQLAlchemy ORM - KPI workflow: draft → pending → approved → evaluation → completed - Ollama LLM API integration for AI features - Gitea API integration for version control - Complete API endpoints for KPI, dashboard, notifications Tables: KPI_D_* prefix naming convention 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
173
app/services/llm_service.py
Normal file
173
app/services/llm_service.py
Normal file
@@ -0,0 +1,173 @@
|
||||
"""
|
||||
Ollama LLM API 服務
|
||||
"""
|
||||
import json
|
||||
from typing import Optional, List, Generator
|
||||
|
||||
import requests
|
||||
|
||||
from app.core.config import settings
|
||||
|
||||
|
||||
class LLMService:
|
||||
"""Ollama LLM API 服務"""
|
||||
|
||||
def __init__(self):
|
||||
self.api_url = settings.OLLAMA_API_URL
|
||||
self.default_model = settings.OLLAMA_DEFAULT_MODEL
|
||||
|
||||
def list_models(self) -> List[str]:
|
||||
"""
|
||||
列出可用模型
|
||||
|
||||
Returns:
|
||||
模型 ID 列表
|
||||
"""
|
||||
try:
|
||||
response = requests.get(f"{self.api_url}/v1/models", timeout=10)
|
||||
response.raise_for_status()
|
||||
models = response.json()
|
||||
return [m["id"] for m in models.get("data", [])]
|
||||
except Exception as e:
|
||||
return [f"Error: {str(e)}"]
|
||||
|
||||
def chat(
|
||||
self,
|
||||
messages: List[dict],
|
||||
model: Optional[str] = None,
|
||||
temperature: float = 0.7,
|
||||
max_tokens: Optional[int] = None,
|
||||
) -> str:
|
||||
"""
|
||||
聊天完成請求
|
||||
|
||||
Args:
|
||||
messages: 對話訊息列表 [{"role": "user", "content": "..."}]
|
||||
model: 模型名稱,預設使用設定檔中的模型
|
||||
temperature: 溫度參數 (0-1)
|
||||
max_tokens: 最大 token 數
|
||||
|
||||
Returns:
|
||||
AI 回應內容
|
||||
"""
|
||||
chat_request = {
|
||||
"model": model or self.default_model,
|
||||
"messages": messages,
|
||||
"temperature": temperature,
|
||||
}
|
||||
|
||||
if max_tokens:
|
||||
chat_request["max_tokens"] = max_tokens
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
f"{self.api_url}/v1/chat/completions",
|
||||
json=chat_request,
|
||||
timeout=60,
|
||||
)
|
||||
response.raise_for_status()
|
||||
result = response.json()
|
||||
return result["choices"][0]["message"]["content"]
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
def chat_stream(
|
||||
self,
|
||||
messages: List[dict],
|
||||
model: Optional[str] = None,
|
||||
temperature: float = 0.7,
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
串流聊天請求
|
||||
|
||||
Args:
|
||||
messages: 對話訊息列表
|
||||
model: 模型名稱
|
||||
temperature: 溫度參數
|
||||
|
||||
Yields:
|
||||
AI 回應內容片段
|
||||
"""
|
||||
chat_request = {
|
||||
"model": model or self.default_model,
|
||||
"messages": messages,
|
||||
"temperature": temperature,
|
||||
"stream": True,
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(
|
||||
f"{self.api_url}/v1/chat/completions",
|
||||
json=chat_request,
|
||||
stream=True,
|
||||
timeout=120,
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
if line.startswith(b"data: "):
|
||||
data_str = line[6:].decode("utf-8")
|
||||
if data_str.strip() != "[DONE]":
|
||||
try:
|
||||
data = json.loads(data_str)
|
||||
if "choices" in data:
|
||||
delta = data["choices"][0].get("delta", {})
|
||||
if "content" in delta:
|
||||
yield delta["content"]
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
except Exception as e:
|
||||
yield f"Error: {str(e)}"
|
||||
|
||||
def simple_ask(
|
||||
self,
|
||||
question: str,
|
||||
system_prompt: str = "You are a helpful assistant.",
|
||||
model: Optional[str] = None,
|
||||
) -> str:
|
||||
"""
|
||||
簡單問答
|
||||
|
||||
Args:
|
||||
question: 使用者問題
|
||||
system_prompt: 系統提示詞
|
||||
model: 模型名稱
|
||||
|
||||
Returns:
|
||||
AI 回應
|
||||
"""
|
||||
messages = [
|
||||
{"role": "system", "content": system_prompt},
|
||||
{"role": "user", "content": question},
|
||||
]
|
||||
return self.chat(messages, model=model)
|
||||
|
||||
def analyze_kpi(self, kpi_data: dict) -> str:
|
||||
"""
|
||||
分析 KPI 數據
|
||||
|
||||
Args:
|
||||
kpi_data: KPI 相關數據
|
||||
|
||||
Returns:
|
||||
AI 分析結果
|
||||
"""
|
||||
system_prompt = """你是一位專業的 KPI 分析師。
|
||||
請根據提供的 KPI 數據,給出專業的分析和建議。
|
||||
回應請使用繁體中文,並保持專業且易懂。"""
|
||||
|
||||
question = f"""請分析以下 KPI 數據:
|
||||
|
||||
{json.dumps(kpi_data, ensure_ascii=False, indent=2)}
|
||||
|
||||
請提供:
|
||||
1. 數據摘要
|
||||
2. 表現評估
|
||||
3. 改善建議"""
|
||||
|
||||
return self.simple_ask(question, system_prompt)
|
||||
|
||||
|
||||
# 單例
|
||||
llm_service = LLMService()
|
||||
Reference in New Issue
Block a user