diff --git a/.claude/settings.local.json b/.claude/settings.local.json
index 29432bb..fa2cc58 100644
--- a/.claude/settings.local.json
+++ b/.claude/settings.local.json
@@ -6,7 +6,8 @@
"Bash(git config:*)",
"Bash(git add:*)",
"Bash(git commit:*)",
- "Bash(git push:*)"
+ "Bash(git push:*)",
+ "Bash(curl:*)"
],
"deny": [],
"ask": []
diff --git a/config/llm.config.js b/config/llm.config.js
index f01e432..ae752fc 100644
--- a/config/llm.config.js
+++ b/config/llm.config.js
@@ -33,8 +33,18 @@ const llmConfig = {
timeout: 30000,
},
+ // Claude Configuration
+ claude: {
+ apiKey: process.env.CLAUDE_API_KEY,
+ apiUrl: process.env.CLAUDE_API_URL || 'https://api.anthropic.com/v1',
+ model: process.env.CLAUDE_MODEL || 'claude-3-5-sonnet-20241022',
+ enabled: !!process.env.CLAUDE_API_KEY,
+ timeout: 30000,
+ version: '2023-06-01', // Anthropic API version
+ },
+
// Default LLM Provider
- defaultProvider: 'gemini',
+ defaultProvider: 'claude',
// Common Settings
maxTokens: 2000,
@@ -49,6 +59,7 @@ function getEnabledProviders() {
if (llmConfig.gemini.enabled) enabled.push('gemini');
if (llmConfig.deepseek.enabled) enabled.push('deepseek');
if (llmConfig.openai.enabled) enabled.push('openai');
+ if (llmConfig.claude.enabled) enabled.push('claude');
return enabled;
}
diff --git a/package.json b/package.json
new file mode 100644
index 0000000..ff8fc10
--- /dev/null
+++ b/package.json
@@ -0,0 +1,40 @@
+{
+ "name": "hr-performance-system",
+ "version": "1.0.0",
+ "description": "HR 績效評核系統 - 四卡循環管理平台",
+ "main": "server.js",
+ "scripts": {
+ "start": "node server.js",
+ "dev": "nodemon server.js",
+ "test": "jest",
+ "lint": "eslint .",
+ "format": "prettier --write \"**/*.{js,jsx,json,md}\""
+ },
+ "keywords": [
+ "hr",
+ "performance",
+ "management",
+ "four-card-system"
+ ],
+ "author": "Donald",
+ "license": "ISC",
+ "dependencies": {
+ "axios": "^1.6.2",
+ "cors": "^2.8.5",
+ "dotenv": "^16.3.1",
+ "express": "^4.18.2",
+ "helmet": "^7.1.0",
+ "morgan": "^1.10.0",
+ "mysql2": "^3.6.5"
+ },
+ "devDependencies": {
+ "nodemon": "^3.0.2",
+ "eslint": "^8.55.0",
+ "prettier": "^3.1.1",
+ "jest": "^29.7.0"
+ },
+ "engines": {
+ "node": ">=16.0.0",
+ "npm": ">=8.0.0"
+ }
+}
diff --git a/public/api-proxy-example.html b/public/api-proxy-example.html
new file mode 100644
index 0000000..6ad8130
--- /dev/null
+++ b/public/api-proxy-example.html
@@ -0,0 +1,504 @@
+
+
+
+
+
+ API 代理使用範例 - HR 績效系統
+
+
+
+
+
+
+
+
+
+
❌ 您遇到的錯誤
+
+
CORS 錯誤:
+
Access to fetch at 'https://api.anthropic.com/v1/messages' from origin 'http://127.0.0.1:5000' has been blocked by CORS policy
+
+
+
Storage 錯誤:
+
Access to storage is not allowed from this context
+
+
+
+
+
+
🤔 為什麼會發生這個錯誤?
+
+
原因:
+
+ - 安全限制:瀏覽器的 CORS 政策不允許直接從前端呼叫第三方 API
+ - API 金鑰暴露:在前端直接使用 API 金鑰會洩露給所有使用者
+ - Storage 限制:本地檔案 (file://) 無法使用 localStorage
+
+
+
+
+
+
+
🔄 錯誤 vs 正確的做法
+
+
+
❌ 錯誤:前端直接呼叫
+
+
+fetch('https://api.anthropic.com/v1/messages', {
+ headers: {
+ 'x-api-key': 'sk-ant-...'
+ }
+})
+
+
+
+
+
✅ 正確:透過後端代理
+
+
+fetch('http://localhost:3000/api/llm/generate', {
+ method: 'POST',
+ body: JSON.stringify({...})
+})
+
+
+
+
+
+
+
+
✅ 正確的使用方式
+
+
1. 測試 Claude API 連線
+
+
等待測試...
+
+
2. 使用 Claude 生成內容
+
+
等待生成...
+
+
3. 測試所有 LLM
+
+
等待測試...
+
+
+
+
+
📝 程式碼範例
+
+
測試連線
+
+
+async function testClaudeConnection() {
+ try {
+ const response = await fetch('http://localhost:3000/api/llm/test/claude', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' }
+ });
+
+ const result = await response.json();
+ console.log(result);
+ } catch (error) {
+ console.error(error);
+ }
+}
+
+
+
生成內容
+
+
+async function generateContent(prompt) {
+ try {
+ const response = await fetch('http://localhost:3000/api/llm/generate', {
+ method: 'POST',
+ headers: { 'Content-Type': 'application/json' },
+ body: JSON.stringify({
+ prompt: prompt,
+ provider: 'claude',
+ options: {
+ temperature: 0.7,
+ maxTokens: 2000
+ }
+ })
+ });
+
+ const result = await response.json();
+ return result.content;
+ } catch (error) {
+ console.error(error);
+ }
+}
+
+
+
+
+
+
⚙️ 設定步驟
+
+
1. 設定環境變數 (.env)
+
+CLAUDE_API_KEY=your_claude_api_key_here
+CLAUDE_API_URL=https://api.anthropic.com/v1
+CLAUDE_MODEL=claude-3-5-sonnet-20241022
+
+
+
+
+
2. 啟動後端伺服器
+
+
+npm install
+
+
+npm run dev
+
+
+npm start
+
+
+
+
+
3. 完成!
+
現在可以透過 http://localhost:3000/api/llm/* 安全地呼叫 LLM API 了
+
+
+
+
+
+
+
+
diff --git a/routes/llm.routes.js b/routes/llm.routes.js
index 504c7e7..d3dd7ce 100644
--- a/routes/llm.routes.js
+++ b/routes/llm.routes.js
@@ -35,6 +35,15 @@ router.post('/test/openai', asyncHandler(async (req, res) => {
res.json(result);
}));
+/**
+ * POST /api/llm/test/claude
+ * 測試 Claude API 連線
+ */
+router.post('/test/claude', asyncHandler(async (req, res) => {
+ const result = await llmService.testClaudeConnection();
+ res.json(result);
+}));
+
/**
* POST /api/llm/test/all
* 測試所有 LLM API 連線
diff --git a/server.js b/server.js
new file mode 100644
index 0000000..6b8c804
--- /dev/null
+++ b/server.js
@@ -0,0 +1,151 @@
+/**
+ * Express Server
+ * HR 績效評核系統後端伺服器
+ */
+
+require('dotenv').config();
+const express = require('express');
+const cors = require('cors');
+const helmet = require('helmet');
+const morgan = require('morgan');
+const path = require('path');
+
+// Import routes
+const llmRoutes = require('./routes/llm.routes');
+
+// Import error handler
+const { handleError } = require('./utils/errorHandler');
+
+// Create Express app
+const app = express();
+const PORT = process.env.PORT || 3000;
+
+// ============================================
+// Middleware
+// ============================================
+
+// Security headers
+app.use(helmet());
+
+// CORS configuration
+app.use(cors({
+ origin: process.env.FRONTEND_URL || '*',
+ methods: ['GET', 'POST', 'PUT', 'DELETE', 'PATCH'],
+ allowedHeaders: ['Content-Type', 'Authorization'],
+ credentials: true,
+}));
+
+// Body parsing
+app.use(express.json({ limit: '10mb' }));
+app.use(express.urlencoded({ extended: true, limit: '10mb' }));
+
+// Logging
+if (process.env.NODE_ENV === 'development') {
+ app.use(morgan('dev'));
+} else {
+ app.use(morgan('combined'));
+}
+
+// Static files
+app.use(express.static('public'));
+
+// ============================================
+// Routes
+// ============================================
+
+// Health check
+app.get('/health', (req, res) => {
+ res.json({
+ success: true,
+ message: 'HR Performance System API is running',
+ timestamp: new Date().toISOString(),
+ environment: process.env.NODE_ENV,
+ });
+});
+
+// API routes
+app.use('/api/llm', llmRoutes);
+
+// Root endpoint
+app.get('/', (req, res) => {
+ res.json({
+ name: 'HR Performance System API',
+ version: '1.0.0',
+ description: '四卡循環績效管理系統',
+ endpoints: {
+ health: '/health',
+ llm: '/api/llm',
+ },
+ });
+});
+
+// ============================================
+// Error Handling
+// ============================================
+
+// 404 handler
+app.use((req, res, next) => {
+ res.status(404).json({
+ success: false,
+ error: {
+ statusCode: 404,
+ message: `Cannot ${req.method} ${req.path}`,
+ timestamp: new Date().toISOString(),
+ path: req.path,
+ },
+ });
+});
+
+// Global error handler
+app.use(handleError);
+
+// ============================================
+// Server Start
+// ============================================
+
+const server = app.listen(PORT, () => {
+ console.log('='.repeat(50));
+ console.log('🚀 HR Performance System API Server');
+ console.log('='.repeat(50));
+ console.log(`📡 Server running on: http://localhost:${PORT}`);
+ console.log(`🌍 Environment: ${process.env.NODE_ENV || 'development'}`);
+ console.log(`📅 Started at: ${new Date().toLocaleString('zh-TW')}`);
+ console.log('='.repeat(50));
+ console.log('\n📚 Available endpoints:');
+ console.log(` GET / - API information`);
+ console.log(` GET /health - Health check`);
+ console.log(` POST /api/llm/test/* - Test LLM connections`);
+ console.log(` POST /api/llm/generate - Generate content with LLM`);
+ console.log('\n✨ Server is ready to accept connections!\n');
+});
+
+// Graceful shutdown
+process.on('SIGTERM', () => {
+ console.log('\n⚠️ SIGTERM received. Shutting down gracefully...');
+ server.close(() => {
+ console.log('✅ Server closed');
+ process.exit(0);
+ });
+});
+
+process.on('SIGINT', () => {
+ console.log('\n⚠️ SIGINT received. Shutting down gracefully...');
+ server.close(() => {
+ console.log('✅ Server closed');
+ process.exit(0);
+ });
+});
+
+// Uncaught exception handler
+process.on('uncaughtException', (error) => {
+ console.error('❌ Uncaught Exception:', error);
+ process.exit(1);
+});
+
+// Unhandled rejection handler
+process.on('unhandledRejection', (reason, promise) => {
+ console.error('❌ Unhandled Rejection at:', promise, 'reason:', reason);
+ process.exit(1);
+});
+
+module.exports = app;
diff --git a/services/llm.service.js b/services/llm.service.js
index fb23e2a..eab22ca 100644
--- a/services/llm.service.js
+++ b/services/llm.service.js
@@ -193,6 +193,68 @@ class LLMService {
}
}
+ /**
+ * 測試 Claude API 連線
+ */
+ async testClaudeConnection() {
+ try {
+ if (!isProviderEnabled('claude')) {
+ return {
+ success: false,
+ message: 'Claude API key not configured',
+ provider: 'claude',
+ };
+ }
+
+ const config = getProviderConfig('claude');
+ const url = `${config.apiUrl}/messages`;
+
+ const response = await axios.post(
+ url,
+ {
+ model: config.model,
+ max_tokens: 50,
+ messages: [
+ {
+ role: 'user',
+ content: 'Hello, this is a connection test.',
+ },
+ ],
+ },
+ {
+ headers: {
+ 'Content-Type': 'application/json',
+ 'x-api-key': config.apiKey,
+ 'anthropic-version': config.version,
+ },
+ timeout: config.timeout,
+ }
+ );
+
+ if (response.status === 200 && response.data.content) {
+ return {
+ success: true,
+ message: 'Claude API connection successful',
+ provider: 'claude',
+ model: config.model,
+ };
+ }
+
+ return {
+ success: false,
+ message: 'Unexpected response from Claude API',
+ provider: 'claude',
+ };
+ } catch (error) {
+ return {
+ success: false,
+ message: error.response?.data?.error?.message || error.message,
+ provider: 'claude',
+ error: error.message,
+ };
+ }
+ }
+
/**
* 測試所有 LLM 連線
*/
@@ -201,6 +263,7 @@ class LLMService {
gemini: await this.testGeminiConnection(),
deepseek: await this.testDeepSeekConnection(),
openai: await this.testOpenAIConnection(),
+ claude: await this.testClaudeConnection(),
};
return results;
@@ -342,6 +405,55 @@ class LLMService {
}
}
+ /**
+ * 使用 Claude 生成內容
+ */
+ async generateWithClaude(prompt, options = {}) {
+ try {
+ if (!isProviderEnabled('claude')) {
+ throw new Error('Claude API not configured');
+ }
+
+ const config = getProviderConfig('claude');
+ const url = `${config.apiUrl}/messages`;
+
+ const response = await axios.post(
+ url,
+ {
+ model: config.model,
+ max_tokens: options.maxTokens || llmConfig.maxTokens,
+ temperature: options.temperature || llmConfig.temperature,
+ messages: [
+ {
+ role: 'user',
+ content: prompt,
+ },
+ ],
+ },
+ {
+ headers: {
+ 'Content-Type': 'application/json',
+ 'x-api-key': config.apiKey,
+ 'anthropic-version': config.version,
+ },
+ timeout: config.timeout,
+ }
+ );
+
+ if (response.data?.content?.[0]?.text) {
+ return {
+ success: true,
+ content: response.data.content[0].text,
+ provider: 'claude',
+ };
+ }
+
+ throw new Error('Invalid response format from Claude');
+ } catch (error) {
+ throw new Error(`Claude API error: ${error.message}`);
+ }
+ }
+
/**
* 使用預設或指定的 LLM 生成內容
*/
@@ -355,6 +467,8 @@ class LLMService {
return await this.generateWithDeepSeek(prompt, options);
case 'openai':
return await this.generateWithOpenAI(prompt, options);
+ case 'claude':
+ return await this.generateWithClaude(prompt, options);
default:
throw new Error(`Unknown provider: ${selectedProvider}`);
}