feat: Add multi-LLM provider support with DeepSeek integration

Major Features:
-  Multi-LLM provider support (DeepSeek, Ollama, OpenAI, Custom)
- 🤖 Admin panel LLM configuration management UI
- 🔄 Dynamic provider switching without restart
- 🧪 Built-in API connection testing
- 🔒 Secure API key management

Backend Changes:
- Add routes/llmConfig.js: Complete LLM config CRUD API
- Update routes/analyze.js: Use database LLM configuration
- Update server.js: Add LLM config routes
- Add scripts/add-deepseek-config.js: DeepSeek setup script

Frontend Changes:
- Update src/pages/AdminPage.jsx: Add LLM Config tab + modal
- Update src/services/api.js: Add LLM config API methods
- Provider presets for DeepSeek, Ollama, OpenAI
- Test connection feature in config modal

Configuration:
- Update .env.example: Add DeepSeek API configuration
- Update package.json: Add llm:add-deepseek script

Documentation:
- Add docs/LLM_CONFIGURATION_GUIDE.md: Complete guide
- Add DEEPSEEK_INTEGRATION.md: Integration summary
- Quick setup instructions for DeepSeek

API Endpoints:
- GET /api/llm-config: List all configurations
- GET /api/llm-config/active: Get active configuration
- POST /api/llm-config: Create configuration
- PUT /api/llm-config/🆔 Update configuration
- PUT /api/llm-config/:id/activate: Activate configuration
- DELETE /api/llm-config/🆔 Delete configuration
- POST /api/llm-config/test: Test API connection

Database:
- Uses existing llm_configs table
- Only one config active at a time
- Fallback to Ollama if no database config

Security:
- Admin-only access to LLM configuration
- API keys never returned in GET requests
- Audit logging for all config changes
- Cannot delete active configuration

DeepSeek Model:
- Model: deepseek-chat
- High-quality 5 Why analysis
- Excellent Chinese language support
- Cost-effective pricing

🤖 Generated with Claude Code
Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
donald
2025-12-06 00:33:10 +08:00
parent 30e39b5c6f
commit 957003bc7c
10 changed files with 1564 additions and 16 deletions

View File

@@ -4,10 +4,37 @@ import Analysis from '../models/Analysis.js';
import AuditLog from '../models/AuditLog.js';
import { asyncHandler } from '../middleware/errorHandler.js';
import { requireAuth } from '../middleware/auth.js';
import { ollamaConfig } from '../config.js';
import { ollamaConfig, query } from '../config.js';
const router = express.Router();
/**
* 從資料庫取得啟用的 LLM 配置
*/
async function getActiveLLMConfig() {
const [config] = await query(
`SELECT provider_name, api_endpoint, api_key, model_name, temperature, max_tokens, timeout_seconds
FROM llm_configs
WHERE is_active = 1
LIMIT 1`
);
// 如果沒有資料庫配置,使用環境變數的 Ollama 配置
if (!config) {
return {
provider_name: 'Ollama',
api_endpoint: ollamaConfig.apiUrl,
api_key: null,
model_name: ollamaConfig.model,
temperature: ollamaConfig.temperature,
max_tokens: ollamaConfig.maxTokens,
timeout_seconds: ollamaConfig.timeout / 1000
};
}
return config;
}
/**
* POST /api/analyze
* 執行 5 Why 分析
@@ -27,6 +54,9 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
const startTime = Date.now();
try {
// 取得啟用的 LLM 配置
const llmConfig = await getActiveLLMConfig();
// 建立分析記錄
const analysis = await Analysis.create({
user_id: userId,
@@ -128,11 +158,11 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
]
}`;
// 呼叫 Ollama API
// 呼叫 LLM API支援 DeepSeek, Ollama 等)
const response = await axios.post(
`${ollamaConfig.apiUrl}/v1/chat/completions`,
`${llmConfig.api_endpoint}/v1/chat/completions`,
{
model: ollamaConfig.model,
model: llmConfig.model_name,
messages: [
{
role: 'system',
@@ -143,21 +173,22 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
content: prompt
}
],
temperature: ollamaConfig.temperature,
max_tokens: ollamaConfig.maxTokens,
temperature: llmConfig.temperature,
max_tokens: llmConfig.max_tokens,
stream: false
},
{
timeout: ollamaConfig.timeout,
timeout: llmConfig.timeout_seconds * 1000,
headers: {
'Content-Type': 'application/json'
'Content-Type': 'application/json',
...(llmConfig.api_key && { 'Authorization': `Bearer ${llmConfig.api_key}` })
}
}
);
// 處理回應
if (!response.data || !response.data.choices || !response.data.choices[0]) {
throw new Error('Invalid response from Ollama API');
throw new Error(`Invalid response from ${llmConfig.provider_name} API`);
}
const content = response.data.choices[0].message.content;
@@ -226,6 +257,9 @@ router.post('/translate', requireAuth, asyncHandler(async (req, res) => {
}
try {
// 取得啟用的 LLM 配置
const llmConfig = await getActiveLLMConfig();
// 取得分析結果
const analysis = await Analysis.findById(analysisId);
@@ -261,9 +295,9 @@ ${JSON.stringify(analysis.analysis_result, null, 2)}
}`;
const response = await axios.post(
`${ollamaConfig.apiUrl}/v1/chat/completions`,
`${llmConfig.api_endpoint}/v1/chat/completions`,
{
model: ollamaConfig.model,
model: llmConfig.model_name,
messages: [
{
role: 'system',
@@ -275,11 +309,15 @@ ${JSON.stringify(analysis.analysis_result, null, 2)}
}
],
temperature: 0.3,
max_tokens: ollamaConfig.maxTokens,
max_tokens: llmConfig.max_tokens,
stream: false
},
{
timeout: ollamaConfig.timeout
timeout: llmConfig.timeout_seconds * 1000,
headers: {
'Content-Type': 'application/json',
...(llmConfig.api_key && { 'Authorization': `Bearer ${llmConfig.api_key}` })
}
}
);