Files
5why-analyzer/routes/llmConfig.js
donald 957003bc7c feat: Add multi-LLM provider support with DeepSeek integration
Major Features:
-  Multi-LLM provider support (DeepSeek, Ollama, OpenAI, Custom)
- 🤖 Admin panel LLM configuration management UI
- 🔄 Dynamic provider switching without restart
- 🧪 Built-in API connection testing
- 🔒 Secure API key management

Backend Changes:
- Add routes/llmConfig.js: Complete LLM config CRUD API
- Update routes/analyze.js: Use database LLM configuration
- Update server.js: Add LLM config routes
- Add scripts/add-deepseek-config.js: DeepSeek setup script

Frontend Changes:
- Update src/pages/AdminPage.jsx: Add LLM Config tab + modal
- Update src/services/api.js: Add LLM config API methods
- Provider presets for DeepSeek, Ollama, OpenAI
- Test connection feature in config modal

Configuration:
- Update .env.example: Add DeepSeek API configuration
- Update package.json: Add llm:add-deepseek script

Documentation:
- Add docs/LLM_CONFIGURATION_GUIDE.md: Complete guide
- Add DEEPSEEK_INTEGRATION.md: Integration summary
- Quick setup instructions for DeepSeek

API Endpoints:
- GET /api/llm-config: List all configurations
- GET /api/llm-config/active: Get active configuration
- POST /api/llm-config: Create configuration
- PUT /api/llm-config/🆔 Update configuration
- PUT /api/llm-config/:id/activate: Activate configuration
- DELETE /api/llm-config/🆔 Delete configuration
- POST /api/llm-config/test: Test API connection

Database:
- Uses existing llm_configs table
- Only one config active at a time
- Fallback to Ollama if no database config

Security:
- Admin-only access to LLM configuration
- API keys never returned in GET requests
- Audit logging for all config changes
- Cannot delete active configuration

DeepSeek Model:
- Model: deepseek-chat
- High-quality 5 Why analysis
- Excellent Chinese language support
- Cost-effective pricing

🤖 Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-06 00:33:10 +08:00

306 lines
6.8 KiB
JavaScript

import express from 'express';
import { query } from '../config.js';
import { asyncHandler } from '../middleware/errorHandler.js';
import { requireAuth, requireAdmin } from '../middleware/auth.js';
import AuditLog from '../models/AuditLog.js';
const router = express.Router();
/**
* GET /api/llm-config
* 取得當前 LLM 配置(所有使用者可見)
*/
router.get('/', requireAuth, asyncHandler(async (req, res) => {
const configs = await query(
`SELECT id, provider_name, model_name, is_active, created_at, updated_at
FROM llm_configs
ORDER BY is_active DESC, created_at DESC`
);
res.json({
success: true,
data: configs
});
}));
/**
* GET /api/llm-config/active
* 取得當前啟用的 LLM 配置
*/
router.get('/active', requireAuth, asyncHandler(async (req, res) => {
const [config] = await query(
`SELECT id, provider_name, api_endpoint, model_name, temperature, max_tokens, timeout_seconds
FROM llm_configs
WHERE is_active = 1
LIMIT 1`
);
if (!config) {
return res.status(404).json({
success: false,
error: '未找到啟用的 LLM 配置'
});
}
res.json({
success: true,
data: config
});
}));
/**
* POST /api/llm-config
* 新增 LLM 配置(僅管理員)
*/
router.post('/', requireAdmin, asyncHandler(async (req, res) => {
const {
provider_name,
api_endpoint,
api_key,
model_name,
temperature,
max_tokens,
timeout_seconds
} = req.body;
// 驗證必填欄位
if (!provider_name || !api_endpoint || !model_name) {
return res.status(400).json({
success: false,
error: '請填寫所有必填欄位'
});
}
const result = await query(
`INSERT INTO llm_configs
(provider_name, api_endpoint, api_key, model_name, temperature, max_tokens, timeout_seconds)
VALUES (?, ?, ?, ?, ?, ?, ?)`,
[
provider_name,
api_endpoint,
api_key || null,
model_name,
temperature || 0.7,
max_tokens || 6000,
timeout_seconds || 120
]
);
// 記錄稽核日誌
await AuditLog.logCreate(
req.session.userId,
'llm_config',
result.insertId,
{ provider_name, model_name },
req.ip,
req.get('user-agent')
);
res.json({
success: true,
message: '已新增 LLM 配置',
data: { id: result.insertId }
});
}));
/**
* PUT /api/llm-config/:id
* 更新 LLM 配置(僅管理員)
*/
router.put('/:id', requireAdmin, asyncHandler(async (req, res) => {
const configId = parseInt(req.params.id);
const {
provider_name,
api_endpoint,
api_key,
model_name,
temperature,
max_tokens,
timeout_seconds
} = req.body;
// 驗證必填欄位
if (!provider_name || !api_endpoint || !model_name) {
return res.status(400).json({
success: false,
error: '請填寫所有必填欄位'
});
}
// 檢查配置是否存在
const [existing] = await query('SELECT id FROM llm_configs WHERE id = ?', [configId]);
if (!existing) {
return res.status(404).json({
success: false,
error: '找不到此 LLM 配置'
});
}
await query(
`UPDATE llm_configs
SET provider_name = ?, api_endpoint = ?, api_key = ?, model_name = ?,
temperature = ?, max_tokens = ?, timeout_seconds = ?, updated_at = NOW()
WHERE id = ?`,
[
provider_name,
api_endpoint,
api_key || null,
model_name,
temperature || 0.7,
max_tokens || 6000,
timeout_seconds || 120,
configId
]
);
// 記錄稽核日誌
await AuditLog.logUpdate(
req.session.userId,
'llm_config',
configId,
{},
{ provider_name, model_name },
req.ip,
req.get('user-agent')
);
res.json({
success: true,
message: '已更新 LLM 配置'
});
}));
/**
* PUT /api/llm-config/:id/activate
* 啟用特定 LLM 配置(僅管理員)
*/
router.put('/:id/activate', requireAdmin, asyncHandler(async (req, res) => {
const configId = parseInt(req.params.id);
// 檢查配置是否存在
const [existing] = await query('SELECT id, provider_name FROM llm_configs WHERE id = ?', [configId]);
if (!existing) {
return res.status(404).json({
success: false,
error: '找不到此 LLM 配置'
});
}
// 先停用所有配置
await query('UPDATE llm_configs SET is_active = 0');
// 啟用指定配置
await query('UPDATE llm_configs SET is_active = 1, updated_at = NOW() WHERE id = ?', [configId]);
// 記錄稽核日誌
await AuditLog.logUpdate(
req.session.userId,
'llm_config',
configId,
{ is_active: 0 },
{ is_active: 1 },
req.ip,
req.get('user-agent')
);
res.json({
success: true,
message: `已啟用 ${existing.provider_name} 配置`
});
}));
/**
* DELETE /api/llm-config/:id
* 刪除 LLM 配置(僅管理員)
*/
router.delete('/:id', requireAdmin, asyncHandler(async (req, res) => {
const configId = parseInt(req.params.id);
// 檢查是否為啟用中的配置
const [existing] = await query('SELECT is_active FROM llm_configs WHERE id = ?', [configId]);
if (!existing) {
return res.status(404).json({
success: false,
error: '找不到此 LLM 配置'
});
}
if (existing.is_active) {
return res.status(400).json({
success: false,
error: '無法刪除啟用中的配置'
});
}
await query('DELETE FROM llm_configs WHERE id = ?', [configId]);
// 記錄稽核日誌
await AuditLog.logDelete(
req.session.userId,
'llm_config',
configId,
{},
req.ip,
req.get('user-agent')
);
res.json({
success: true,
message: '已刪除 LLM 配置'
});
}));
/**
* POST /api/llm-config/test
* 測試 LLM 配置連線(僅管理員)
*/
router.post('/test', requireAdmin, asyncHandler(async (req, res) => {
const { api_endpoint, api_key, model_name } = req.body;
if (!api_endpoint || !model_name) {
return res.status(400).json({
success: false,
error: '請提供 API 端點和模型名稱'
});
}
try {
const axios = (await import('axios')).default;
const response = await axios.post(
`${api_endpoint}/v1/chat/completions`,
{
model: model_name,
messages: [
{ role: 'user', content: 'Hello' }
],
max_tokens: 10
},
{
timeout: 10000,
headers: {
'Content-Type': 'application/json',
...(api_key && { 'Authorization': `Bearer ${api_key}` })
}
}
);
if (response.data && response.data.choices) {
res.json({
success: true,
message: 'LLM API 連線測試成功'
});
} else {
throw new Error('Invalid API response format');
}
} catch (error) {
res.status(500).json({
success: false,
error: 'LLM API 連線測試失敗',
message: error.message
});
}
}));
export default router;