feat: Implement role-based access control (RBAC) with 3-tier authorization

- Add 3 user roles: user, admin, super_admin
- Restrict LLM config management to super_admin only
- Restrict audit logs and statistics to super_admin only
- Update AdminPage with role-based tab visibility
- Add complete 5 Why prompt from 5why-analyzer.jsx
- Add system documentation and authorization guide
- Add ErrorModal component and seed test users script

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
donald
2025-12-08 19:29:28 +08:00
parent 957003bc7c
commit 66cdcacce9
11 changed files with 1791 additions and 158 deletions

View File

@@ -219,7 +219,7 @@ router.get('/analyses', requireAdmin, asyncHandler(async (req, res) => {
* GET /api/admin/audit-logs
* 取得稽核日誌
*/
router.get('/audit-logs', requireAdmin, asyncHandler(async (req, res) => {
router.get('/audit-logs', requireSuperAdmin, asyncHandler(async (req, res) => {
const page = parseInt(req.query.page) || 1;
const limit = parseInt(req.query.limit) || 50;
const filters = {
@@ -243,7 +243,7 @@ router.get('/audit-logs', requireAdmin, asyncHandler(async (req, res) => {
* GET /api/admin/statistics
* 取得完整統計資料
*/
router.get('/statistics', requireAdmin, asyncHandler(async (req, res) => {
router.get('/statistics', requireSuperAdmin, asyncHandler(async (req, res) => {
const overallStats = await Analysis.getStatistics();
const users = await User.getAll(1, 1000);

View File

@@ -13,7 +13,7 @@ const router = express.Router();
*/
async function getActiveLLMConfig() {
const [config] = await query(
`SELECT provider_name, api_endpoint, api_key, model_name, temperature, max_tokens, timeout_seconds
`SELECT provider, api_url, api_key, model_name, temperature, max_tokens, timeout
FROM llm_configs
WHERE is_active = 1
LIMIT 1`
@@ -22,13 +22,13 @@ async function getActiveLLMConfig() {
// 如果沒有資料庫配置,使用環境變數的 Ollama 配置
if (!config) {
return {
provider_name: 'Ollama',
api_endpoint: ollamaConfig.apiUrl,
provider: 'Ollama',
api_url: ollamaConfig.apiUrl,
api_key: null,
model_name: ollamaConfig.model,
temperature: ollamaConfig.temperature,
max_tokens: ollamaConfig.maxTokens,
timeout_seconds: ollamaConfig.timeout / 1000
timeout: ollamaConfig.timeout
};
}
@@ -52,13 +52,14 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
}
const startTime = Date.now();
let analysis = null;
try {
// 取得啟用的 LLM 配置
const llmConfig = await getActiveLLMConfig();
// 建立分析記錄
const analysis = await Analysis.create({
analysis = await Analysis.create({
user_id: userId,
finding,
job_content: jobContent,
@@ -121,7 +122,7 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
注意:
- 5 Why 的目的不是「湊滿五個問題」,而是穿透表面症狀直達根本原因
- 若在第 3 或第 4 個 Why 就已找到真正的根本原因,可以停止(設為 null
- 若在第 3 或第 4 個 Why 就已找到真正的根本原因,可以停止
- 每個 Why 必須標註是「已驗證事實」還是「待驗證假設」
- 最終對策必須是「永久性對策」
@@ -159,26 +160,42 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
}`;
// 呼叫 LLM API支援 DeepSeek, Ollama 等)
// DeepSeek 限制 max_tokens 最大為 8192確保不超過
const effectiveMaxTokens = Math.min(
Math.max(parseInt(llmConfig.max_tokens) || 4000, 4000),
8000 // DeepSeek 最大限制
);
const effectiveTemperature = parseFloat(llmConfig.temperature) || 0.7;
console.log('Using max_tokens:', effectiveMaxTokens, 'temperature:', effectiveTemperature);
const response = await axios.post(
`${llmConfig.api_endpoint}/v1/chat/completions`,
`${llmConfig.api_url}/v1/chat/completions`,
{
model: llmConfig.model_name,
messages: [
{
role: 'system',
content: 'You are an expert consultant specializing in 5 Why root cause analysis. You always respond in valid JSON format without any markdown code blocks.'
content: `你是 5 Why 根因分析專家。
重要規則:
1. 只回覆 JSON不要任何其他文字
2. 不要使用 markdown 代碼塊
3. 直接以 { 開頭,以 } 結尾
4. 確保 JSON 格式正確完整
5. analyses 陣列必須包含 3 個分析角度
6. 每個角度的 whys 陣列包含 3-5 個 why`
},
{
role: 'user',
content: prompt
}
],
temperature: llmConfig.temperature,
max_tokens: llmConfig.max_tokens,
temperature: effectiveTemperature,
max_tokens: effectiveMaxTokens,
stream: false
},
{
timeout: llmConfig.timeout_seconds * 1000,
timeout: llmConfig.timeout,
headers: {
'Content-Type': 'application/json',
...(llmConfig.api_key && { 'Authorization': `Bearer ${llmConfig.api_key}` })
@@ -188,12 +205,97 @@ router.post('/', requireAuth, asyncHandler(async (req, res) => {
// 處理回應
if (!response.data || !response.data.choices || !response.data.choices[0]) {
throw new Error(`Invalid response from ${llmConfig.provider_name} API`);
throw new Error(`Invalid response from ${llmConfig.provider} API`);
}
const content = response.data.choices[0].message.content;
const cleanContent = content.replace(/```json|```/g, '').trim();
const result = JSON.parse(cleanContent);
console.log('LLM Response length:', content.length);
console.log('LLM Response (first 500 chars):', content.substring(0, 500));
// 清理回應內容
let cleanContent = content
.replace(/```json\s*/gi, '')
.replace(/```\s*/g, '')
.replace(/<\|[^|]*\|>/g, '') // 移除 <|channel|> 等特殊標記
.replace(/<think>[\s\S]*?<\/think>/gi, '') // 移除思考過程
.replace(/^[\s\S]*?(?=\{)/m, '') // 移除 JSON 之前的所有內容
.trim();
// 找到 JSON 開始和結束位置
const jsonStart = cleanContent.indexOf('{');
const jsonEnd = cleanContent.lastIndexOf('}');
if (jsonStart === -1) {
console.error('No JSON found in response:', cleanContent.substring(0, 500));
throw new Error('LLM 回應格式錯誤,無法找到 JSON');
}
// 提取 JSON 部分
cleanContent = cleanContent.substring(jsonStart, jsonEnd + 1);
console.log('Extracted JSON length:', cleanContent.length);
// 嘗試解析 JSON
let result;
try {
result = JSON.parse(cleanContent);
} catch (parseError) {
console.log('JSON parse failed:', parseError.message);
console.log('Attempting to fix JSON...');
// 嘗試修復常見問題
let fixedContent = cleanContent
// 修復未轉義的換行符
.replace(/\n/g, '\\n')
.replace(/\r/g, '\\r')
.replace(/\t/g, '\\t')
// 修復尾部逗號
.replace(/,(\s*[\}\]])/g, '$1')
// 修復缺少逗號
.replace(/"\s*\n\s*"/g, '",\n"')
.replace(/\}\s*\{/g, '},{')
.replace(/\]\s*\[/g, '],[');
try {
result = JSON.parse(fixedContent);
console.log('Fixed JSON parse successful');
} catch (fixError) {
// 最後嘗試:用更激進的方式修復
console.log('Aggressive fix attempt...');
// 計算括號平衡
let braces = 0, brackets = 0, inStr = false, escape = false;
for (const c of fixedContent) {
if (escape) { escape = false; continue; }
if (c === '\\') { escape = true; continue; }
if (c === '"') { inStr = !inStr; continue; }
if (!inStr) {
if (c === '{') braces++;
else if (c === '}') braces--;
else if (c === '[') brackets++;
else if (c === ']') brackets--;
}
}
// 嘗試補上缺少的括號
fixedContent = fixedContent.replace(/,\s*$/, '');
while (brackets > 0) { fixedContent += ']'; brackets--; }
while (braces > 0) { fixedContent += '}'; braces--; }
try {
result = JSON.parse(fixedContent);
console.log('Aggressive fix successful');
} catch (finalError) {
console.error('All JSON fix attempts failed');
console.error('Original content (first 1000):', cleanContent.substring(0, 1000));
throw new Error(`JSON 解析失敗。請重試或簡化輸入內容。`);
}
}
}
// 驗證結果結構
if (!result.problemRestatement || !result.analyses || !Array.isArray(result.analyses)) {
throw new Error('LLM 回應缺少必要欄位 (problemRestatement 或 analyses)');
}
// 計算處理時間
const processingTime = Math.floor((Date.now() - startTime) / 1000);
@@ -295,7 +397,7 @@ ${JSON.stringify(analysis.analysis_result, null, 2)}
}`;
const response = await axios.post(
`${llmConfig.api_endpoint}/v1/chat/completions`,
`${llmConfig.api_url}/v1/chat/completions`,
{
model: llmConfig.model_name,
messages: [
@@ -313,7 +415,7 @@ ${JSON.stringify(analysis.analysis_result, null, 2)}
stream: false
},
{
timeout: llmConfig.timeout_seconds * 1000,
timeout: llmConfig.timeout,
headers: {
'Content-Type': 'application/json',
...(llmConfig.api_key && { 'Authorization': `Bearer ${llmConfig.api_key}` })
@@ -322,7 +424,22 @@ ${JSON.stringify(analysis.analysis_result, null, 2)}
);
const content = response.data.choices[0].message.content;
const cleanContent = content.replace(/```json|```/g, '').trim();
// 清理回應內容,移除 markdown 代碼塊標記和特殊標記
let cleanContent = content
.replace(/```json\s*/gi, '')
.replace(/```\s*/g, '')
.replace(/<\|[^|]*\|>/g, '')
.replace(/^[^{]*/, '')
.trim();
// 嘗試提取 JSON 對象
const jsonMatch = cleanContent.match(/\{[\s\S]*\}/);
if (!jsonMatch) {
throw new Error('翻譯結果格式錯誤');
}
cleanContent = jsonMatch[0];
const result = JSON.parse(cleanContent);
res.json({

View File

@@ -1,7 +1,7 @@
import express from 'express';
import { query } from '../config.js';
import { asyncHandler } from '../middleware/errorHandler.js';
import { requireAuth, requireAdmin } from '../middleware/auth.js';
import { requireAuth, requireSuperAdmin } from '../middleware/auth.js';
import AuditLog from '../models/AuditLog.js';
const router = express.Router();
@@ -12,7 +12,7 @@ const router = express.Router();
*/
router.get('/', requireAuth, asyncHandler(async (req, res) => {
const configs = await query(
`SELECT id, provider_name, model_name, is_active, created_at, updated_at
`SELECT id, provider, api_url, model_name, is_active, created_at, updated_at
FROM llm_configs
ORDER BY is_active DESC, created_at DESC`
);
@@ -29,7 +29,7 @@ router.get('/', requireAuth, asyncHandler(async (req, res) => {
*/
router.get('/active', requireAuth, asyncHandler(async (req, res) => {
const [config] = await query(
`SELECT id, provider_name, api_endpoint, model_name, temperature, max_tokens, timeout_seconds
`SELECT id, provider, api_url, model_name, temperature, max_tokens, timeout
FROM llm_configs
WHERE is_active = 1
LIMIT 1`
@@ -52,19 +52,19 @@ router.get('/active', requireAuth, asyncHandler(async (req, res) => {
* POST /api/llm-config
* 新增 LLM 配置(僅管理員)
*/
router.post('/', requireAdmin, asyncHandler(async (req, res) => {
router.post('/', requireSuperAdmin, asyncHandler(async (req, res) => {
const {
provider_name,
api_endpoint,
provider,
api_url,
api_key,
model_name,
temperature,
max_tokens,
timeout_seconds
timeout
} = req.body;
// 驗證必填欄位
if (!provider_name || !api_endpoint || !model_name) {
if (!provider || !api_url || !model_name) {
return res.status(400).json({
success: false,
error: '請填寫所有必填欄位'
@@ -73,16 +73,16 @@ router.post('/', requireAdmin, asyncHandler(async (req, res) => {
const result = await query(
`INSERT INTO llm_configs
(provider_name, api_endpoint, api_key, model_name, temperature, max_tokens, timeout_seconds)
(provider, api_url, api_key, model_name, temperature, max_tokens, timeout)
VALUES (?, ?, ?, ?, ?, ?, ?)`,
[
provider_name,
api_endpoint,
provider,
api_url,
api_key || null,
model_name,
temperature || 0.7,
max_tokens || 6000,
timeout_seconds || 120
timeout || 120000
]
);
@@ -91,7 +91,7 @@ router.post('/', requireAdmin, asyncHandler(async (req, res) => {
req.session.userId,
'llm_config',
result.insertId,
{ provider_name, model_name },
{ provider, model_name },
req.ip,
req.get('user-agent')
);
@@ -107,20 +107,20 @@ router.post('/', requireAdmin, asyncHandler(async (req, res) => {
* PUT /api/llm-config/:id
* 更新 LLM 配置(僅管理員)
*/
router.put('/:id', requireAdmin, asyncHandler(async (req, res) => {
router.put('/:id', requireSuperAdmin, asyncHandler(async (req, res) => {
const configId = parseInt(req.params.id);
const {
provider_name,
api_endpoint,
provider,
api_url,
api_key,
model_name,
temperature,
max_tokens,
timeout_seconds
timeout
} = req.body;
// 驗證必填欄位
if (!provider_name || !api_endpoint || !model_name) {
if (!provider || !api_url || !model_name) {
return res.status(400).json({
success: false,
error: '請填寫所有必填欄位'
@@ -138,17 +138,17 @@ router.put('/:id', requireAdmin, asyncHandler(async (req, res) => {
await query(
`UPDATE llm_configs
SET provider_name = ?, api_endpoint = ?, api_key = ?, model_name = ?,
temperature = ?, max_tokens = ?, timeout_seconds = ?, updated_at = NOW()
SET provider = ?, api_url = ?, api_key = ?, model_name = ?,
temperature = ?, max_tokens = ?, timeout = ?, updated_at = NOW()
WHERE id = ?`,
[
provider_name,
api_endpoint,
provider,
api_url,
api_key || null,
model_name,
temperature || 0.7,
max_tokens || 6000,
timeout_seconds || 120,
timeout || 120000,
configId
]
);
@@ -159,7 +159,7 @@ router.put('/:id', requireAdmin, asyncHandler(async (req, res) => {
'llm_config',
configId,
{},
{ provider_name, model_name },
{ provider, model_name },
req.ip,
req.get('user-agent')
);
@@ -174,11 +174,11 @@ router.put('/:id', requireAdmin, asyncHandler(async (req, res) => {
* PUT /api/llm-config/:id/activate
* 啟用特定 LLM 配置(僅管理員)
*/
router.put('/:id/activate', requireAdmin, asyncHandler(async (req, res) => {
router.put('/:id/activate', requireSuperAdmin, asyncHandler(async (req, res) => {
const configId = parseInt(req.params.id);
// 檢查配置是否存在
const [existing] = await query('SELECT id, provider_name FROM llm_configs WHERE id = ?', [configId]);
const [existing] = await query('SELECT id, provider FROM llm_configs WHERE id = ?', [configId]);
if (!existing) {
return res.status(404).json({
success: false,
@@ -205,7 +205,7 @@ router.put('/:id/activate', requireAdmin, asyncHandler(async (req, res) => {
res.json({
success: true,
message: `已啟用 ${existing.provider_name} 配置`
message: `已啟用 ${existing.provider} 配置`
});
}));
@@ -213,7 +213,7 @@ router.put('/:id/activate', requireAdmin, asyncHandler(async (req, res) => {
* DELETE /api/llm-config/:id
* 刪除 LLM 配置(僅管理員)
*/
router.delete('/:id', requireAdmin, asyncHandler(async (req, res) => {
router.delete('/:id', requireSuperAdmin, asyncHandler(async (req, res) => {
const configId = parseInt(req.params.id);
// 檢查是否為啟用中的配置
@@ -254,10 +254,10 @@ router.delete('/:id', requireAdmin, asyncHandler(async (req, res) => {
* POST /api/llm-config/test
* 測試 LLM 配置連線(僅管理員)
*/
router.post('/test', requireAdmin, asyncHandler(async (req, res) => {
const { api_endpoint, api_key, model_name } = req.body;
router.post('/test', requireSuperAdmin, asyncHandler(async (req, res) => {
const { api_url, api_key, model_name } = req.body;
if (!api_endpoint || !model_name) {
if (!api_url || !model_name) {
return res.status(400).json({
success: false,
error: '請提供 API 端點和模型名稱'
@@ -268,7 +268,7 @@ router.post('/test', requireAdmin, asyncHandler(async (req, res) => {
const axios = (await import('axios')).default;
const response = await axios.post(
`${api_endpoint}/v1/chat/completions`,
`${api_url}/v1/chat/completions`,
{
model: model_name,
messages: [