feat: Add multi-LLM provider support with DeepSeek integration
Major Features: - ✨ Multi-LLM provider support (DeepSeek, Ollama, OpenAI, Custom) - 🤖 Admin panel LLM configuration management UI - 🔄 Dynamic provider switching without restart - 🧪 Built-in API connection testing - 🔒 Secure API key management Backend Changes: - Add routes/llmConfig.js: Complete LLM config CRUD API - Update routes/analyze.js: Use database LLM configuration - Update server.js: Add LLM config routes - Add scripts/add-deepseek-config.js: DeepSeek setup script Frontend Changes: - Update src/pages/AdminPage.jsx: Add LLM Config tab + modal - Update src/services/api.js: Add LLM config API methods - Provider presets for DeepSeek, Ollama, OpenAI - Test connection feature in config modal Configuration: - Update .env.example: Add DeepSeek API configuration - Update package.json: Add llm:add-deepseek script Documentation: - Add docs/LLM_CONFIGURATION_GUIDE.md: Complete guide - Add DEEPSEEK_INTEGRATION.md: Integration summary - Quick setup instructions for DeepSeek API Endpoints: - GET /api/llm-config: List all configurations - GET /api/llm-config/active: Get active configuration - POST /api/llm-config: Create configuration - PUT /api/llm-config/🆔 Update configuration - PUT /api/llm-config/:id/activate: Activate configuration - DELETE /api/llm-config/🆔 Delete configuration - POST /api/llm-config/test: Test API connection Database: - Uses existing llm_configs table - Only one config active at a time - Fallback to Ollama if no database config Security: - Admin-only access to LLM configuration - API keys never returned in GET requests - Audit logging for all config changes - Cannot delete active configuration DeepSeek Model: - Model: deepseek-chat - High-quality 5 Why analysis - Excellent Chinese language support - Cost-effective pricing 🤖 Generated with Claude Code Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -4,10 +4,37 @@ import Analysis from '../models/Analysis.js';
|
||||
import AuditLog from '../models/AuditLog.js';
|
||||
import { asyncHandler } from '../middleware/errorHandler.js';
|
||||
import { requireAuth } from '../middleware/auth.js';
|
||||
import { ollamaConfig } from '../config.js';
|
||||
import { ollamaConfig, query } from '../config.js';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* 從資料庫取得啟用的 LLM 配置
|
||||
*/
|
||||
async function getActiveLLMConfig() {
|
||||
const [config] = await query(
|
||||
`SELECT provider_name, api_endpoint, api_key, model_name, temperature, max_tokens, timeout_seconds
|
||||
FROM llm_configs
|
||||
WHERE is_active = 1
|
||||
LIMIT 1`
|
||||
);
|
||||
|
||||
// 如果沒有資料庫配置,使用環境變數的 Ollama 配置
|
||||
if (!config) {
|
||||
return {
|
||||
provider_name: 'Ollama',
|
||||
api_endpoint: ollamaConfig.apiUrl,
|
||||
api_key: null,
|
||||
model_name: ollamaConfig.model,
|
||||
temperature: ollamaConfig.temperature,
|
||||
max_tokens: ollamaConfig.maxTokens,
|
||||
timeout_seconds: ollamaConfig.timeout / 1000
|
||||
};
|
||||
}
|
||||
|
||||
return config;
|
||||
}
|
||||
|
||||
/**
|
||||
* POST /api/analyze
|
||||
* 執行 5 Why 分析
|
||||
@@ -27,6 +54,9 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
|
||||
const startTime = Date.now();
|
||||
|
||||
try {
|
||||
// 取得啟用的 LLM 配置
|
||||
const llmConfig = await getActiveLLMConfig();
|
||||
|
||||
// 建立分析記錄
|
||||
const analysis = await Analysis.create({
|
||||
user_id: userId,
|
||||
@@ -128,11 +158,11 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
|
||||
]
|
||||
}`;
|
||||
|
||||
// 呼叫 Ollama API
|
||||
// 呼叫 LLM API(支援 DeepSeek, Ollama 等)
|
||||
const response = await axios.post(
|
||||
`${ollamaConfig.apiUrl}/v1/chat/completions`,
|
||||
`${llmConfig.api_endpoint}/v1/chat/completions`,
|
||||
{
|
||||
model: ollamaConfig.model,
|
||||
model: llmConfig.model_name,
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -143,21 +173,22 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
|
||||
content: prompt
|
||||
}
|
||||
],
|
||||
temperature: ollamaConfig.temperature,
|
||||
max_tokens: ollamaConfig.maxTokens,
|
||||
temperature: llmConfig.temperature,
|
||||
max_tokens: llmConfig.max_tokens,
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: ollamaConfig.timeout,
|
||||
timeout: llmConfig.timeout_seconds * 1000,
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
'Content-Type': 'application/json',
|
||||
...(llmConfig.api_key && { 'Authorization': `Bearer ${llmConfig.api_key}` })
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// 處理回應
|
||||
if (!response.data || !response.data.choices || !response.data.choices[0]) {
|
||||
throw new Error('Invalid response from Ollama API');
|
||||
throw new Error(`Invalid response from ${llmConfig.provider_name} API`);
|
||||
}
|
||||
|
||||
const content = response.data.choices[0].message.content;
|
||||
@@ -226,6 +257,9 @@ router.post('/translate', requireAuth, asyncHandler(async (req, res) => {
|
||||
}
|
||||
|
||||
try {
|
||||
// 取得啟用的 LLM 配置
|
||||
const llmConfig = await getActiveLLMConfig();
|
||||
|
||||
// 取得分析結果
|
||||
const analysis = await Analysis.findById(analysisId);
|
||||
|
||||
@@ -261,9 +295,9 @@ ${JSON.stringify(analysis.analysis_result, null, 2)}
|
||||
}`;
|
||||
|
||||
const response = await axios.post(
|
||||
`${ollamaConfig.apiUrl}/v1/chat/completions`,
|
||||
`${llmConfig.api_endpoint}/v1/chat/completions`,
|
||||
{
|
||||
model: ollamaConfig.model,
|
||||
model: llmConfig.model_name,
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
@@ -275,11 +309,15 @@ ${JSON.stringify(analysis.analysis_result, null, 2)}
|
||||
}
|
||||
],
|
||||
temperature: 0.3,
|
||||
max_tokens: ollamaConfig.maxTokens,
|
||||
max_tokens: llmConfig.max_tokens,
|
||||
stream: false
|
||||
},
|
||||
{
|
||||
timeout: ollamaConfig.timeout
|
||||
timeout: llmConfig.timeout_seconds * 1000,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(llmConfig.api_key && { 'Authorization': `Bearer ${llmConfig.api_key}` })
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
|
||||
305
routes/llmConfig.js
Normal file
305
routes/llmConfig.js
Normal file
@@ -0,0 +1,305 @@
|
||||
import express from 'express';
|
||||
import { query } from '../config.js';
|
||||
import { asyncHandler } from '../middleware/errorHandler.js';
|
||||
import { requireAuth, requireAdmin } from '../middleware/auth.js';
|
||||
import AuditLog from '../models/AuditLog.js';
|
||||
|
||||
const router = express.Router();
|
||||
|
||||
/**
|
||||
* GET /api/llm-config
|
||||
* 取得當前 LLM 配置(所有使用者可見)
|
||||
*/
|
||||
router.get('/', requireAuth, asyncHandler(async (req, res) => {
|
||||
const configs = await query(
|
||||
`SELECT id, provider_name, model_name, is_active, created_at, updated_at
|
||||
FROM llm_configs
|
||||
ORDER BY is_active DESC, created_at DESC`
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
data: configs
|
||||
});
|
||||
}));
|
||||
|
||||
/**
|
||||
* GET /api/llm-config/active
|
||||
* 取得當前啟用的 LLM 配置
|
||||
*/
|
||||
router.get('/active', requireAuth, asyncHandler(async (req, res) => {
|
||||
const [config] = await query(
|
||||
`SELECT id, provider_name, api_endpoint, model_name, temperature, max_tokens, timeout_seconds
|
||||
FROM llm_configs
|
||||
WHERE is_active = 1
|
||||
LIMIT 1`
|
||||
);
|
||||
|
||||
if (!config) {
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
error: '未找到啟用的 LLM 配置'
|
||||
});
|
||||
}
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
data: config
|
||||
});
|
||||
}));
|
||||
|
||||
/**
|
||||
* POST /api/llm-config
|
||||
* 新增 LLM 配置(僅管理員)
|
||||
*/
|
||||
router.post('/', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const {
|
||||
provider_name,
|
||||
api_endpoint,
|
||||
api_key,
|
||||
model_name,
|
||||
temperature,
|
||||
max_tokens,
|
||||
timeout_seconds
|
||||
} = req.body;
|
||||
|
||||
// 驗證必填欄位
|
||||
if (!provider_name || !api_endpoint || !model_name) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: '請填寫所有必填欄位'
|
||||
});
|
||||
}
|
||||
|
||||
const result = await query(
|
||||
`INSERT INTO llm_configs
|
||||
(provider_name, api_endpoint, api_key, model_name, temperature, max_tokens, timeout_seconds)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)`,
|
||||
[
|
||||
provider_name,
|
||||
api_endpoint,
|
||||
api_key || null,
|
||||
model_name,
|
||||
temperature || 0.7,
|
||||
max_tokens || 6000,
|
||||
timeout_seconds || 120
|
||||
]
|
||||
);
|
||||
|
||||
// 記錄稽核日誌
|
||||
await AuditLog.logCreate(
|
||||
req.session.userId,
|
||||
'llm_config',
|
||||
result.insertId,
|
||||
{ provider_name, model_name },
|
||||
req.ip,
|
||||
req.get('user-agent')
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: '已新增 LLM 配置',
|
||||
data: { id: result.insertId }
|
||||
});
|
||||
}));
|
||||
|
||||
/**
|
||||
* PUT /api/llm-config/:id
|
||||
* 更新 LLM 配置(僅管理員)
|
||||
*/
|
||||
router.put('/:id', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const configId = parseInt(req.params.id);
|
||||
const {
|
||||
provider_name,
|
||||
api_endpoint,
|
||||
api_key,
|
||||
model_name,
|
||||
temperature,
|
||||
max_tokens,
|
||||
timeout_seconds
|
||||
} = req.body;
|
||||
|
||||
// 驗證必填欄位
|
||||
if (!provider_name || !api_endpoint || !model_name) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: '請填寫所有必填欄位'
|
||||
});
|
||||
}
|
||||
|
||||
// 檢查配置是否存在
|
||||
const [existing] = await query('SELECT id FROM llm_configs WHERE id = ?', [configId]);
|
||||
if (!existing) {
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
error: '找不到此 LLM 配置'
|
||||
});
|
||||
}
|
||||
|
||||
await query(
|
||||
`UPDATE llm_configs
|
||||
SET provider_name = ?, api_endpoint = ?, api_key = ?, model_name = ?,
|
||||
temperature = ?, max_tokens = ?, timeout_seconds = ?, updated_at = NOW()
|
||||
WHERE id = ?`,
|
||||
[
|
||||
provider_name,
|
||||
api_endpoint,
|
||||
api_key || null,
|
||||
model_name,
|
||||
temperature || 0.7,
|
||||
max_tokens || 6000,
|
||||
timeout_seconds || 120,
|
||||
configId
|
||||
]
|
||||
);
|
||||
|
||||
// 記錄稽核日誌
|
||||
await AuditLog.logUpdate(
|
||||
req.session.userId,
|
||||
'llm_config',
|
||||
configId,
|
||||
{},
|
||||
{ provider_name, model_name },
|
||||
req.ip,
|
||||
req.get('user-agent')
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: '已更新 LLM 配置'
|
||||
});
|
||||
}));
|
||||
|
||||
/**
|
||||
* PUT /api/llm-config/:id/activate
|
||||
* 啟用特定 LLM 配置(僅管理員)
|
||||
*/
|
||||
router.put('/:id/activate', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const configId = parseInt(req.params.id);
|
||||
|
||||
// 檢查配置是否存在
|
||||
const [existing] = await query('SELECT id, provider_name FROM llm_configs WHERE id = ?', [configId]);
|
||||
if (!existing) {
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
error: '找不到此 LLM 配置'
|
||||
});
|
||||
}
|
||||
|
||||
// 先停用所有配置
|
||||
await query('UPDATE llm_configs SET is_active = 0');
|
||||
|
||||
// 啟用指定配置
|
||||
await query('UPDATE llm_configs SET is_active = 1, updated_at = NOW() WHERE id = ?', [configId]);
|
||||
|
||||
// 記錄稽核日誌
|
||||
await AuditLog.logUpdate(
|
||||
req.session.userId,
|
||||
'llm_config',
|
||||
configId,
|
||||
{ is_active: 0 },
|
||||
{ is_active: 1 },
|
||||
req.ip,
|
||||
req.get('user-agent')
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: `已啟用 ${existing.provider_name} 配置`
|
||||
});
|
||||
}));
|
||||
|
||||
/**
|
||||
* DELETE /api/llm-config/:id
|
||||
* 刪除 LLM 配置(僅管理員)
|
||||
*/
|
||||
router.delete('/:id', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const configId = parseInt(req.params.id);
|
||||
|
||||
// 檢查是否為啟用中的配置
|
||||
const [existing] = await query('SELECT is_active FROM llm_configs WHERE id = ?', [configId]);
|
||||
if (!existing) {
|
||||
return res.status(404).json({
|
||||
success: false,
|
||||
error: '找不到此 LLM 配置'
|
||||
});
|
||||
}
|
||||
|
||||
if (existing.is_active) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: '無法刪除啟用中的配置'
|
||||
});
|
||||
}
|
||||
|
||||
await query('DELETE FROM llm_configs WHERE id = ?', [configId]);
|
||||
|
||||
// 記錄稽核日誌
|
||||
await AuditLog.logDelete(
|
||||
req.session.userId,
|
||||
'llm_config',
|
||||
configId,
|
||||
{},
|
||||
req.ip,
|
||||
req.get('user-agent')
|
||||
);
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
message: '已刪除 LLM 配置'
|
||||
});
|
||||
}));
|
||||
|
||||
/**
|
||||
* POST /api/llm-config/test
|
||||
* 測試 LLM 配置連線(僅管理員)
|
||||
*/
|
||||
router.post('/test', requireAdmin, asyncHandler(async (req, res) => {
|
||||
const { api_endpoint, api_key, model_name } = req.body;
|
||||
|
||||
if (!api_endpoint || !model_name) {
|
||||
return res.status(400).json({
|
||||
success: false,
|
||||
error: '請提供 API 端點和模型名稱'
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
const axios = (await import('axios')).default;
|
||||
|
||||
const response = await axios.post(
|
||||
`${api_endpoint}/v1/chat/completions`,
|
||||
{
|
||||
model: model_name,
|
||||
messages: [
|
||||
{ role: 'user', content: 'Hello' }
|
||||
],
|
||||
max_tokens: 10
|
||||
},
|
||||
{
|
||||
timeout: 10000,
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
...(api_key && { 'Authorization': `Bearer ${api_key}` })
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
if (response.data && response.data.choices) {
|
||||
res.json({
|
||||
success: true,
|
||||
message: 'LLM API 連線測試成功'
|
||||
});
|
||||
} else {
|
||||
throw new Error('Invalid API response format');
|
||||
}
|
||||
} catch (error) {
|
||||
res.status(500).json({
|
||||
success: false,
|
||||
error: 'LLM API 連線測試失敗',
|
||||
message: error.message
|
||||
});
|
||||
}
|
||||
}));
|
||||
|
||||
export default router;
|
||||
Reference in New Issue
Block a user