Phase 0 & Phase 2 completed: - Project structure setup - Environment configuration (.env, .gitignore) - Enterprise-grade dependencies (bcrypt, helmet, mysql2, etc.) - Complete database schema with 8 tables + 2 views - Database initialization scripts - Comprehensive documentation Database Tables: - users (user management with 3-tier permissions) - analyses (analysis records) - analysis_perspectives (multi-angle analysis) - analysis_whys (detailed 5 Why records) - llm_configs (LLM API configurations) - system_settings (system parameters) - audit_logs (security audit trail) - sessions (session management) Tech Stack: - Backend: Node.js + Express - Frontend: React 18 + Vite + Tailwind CSS - Database: MySQL 9.4.0 - AI: Ollama API (qwen2.5:3b) Generated with Claude Code Co-Authored-By: Claude <noreply@anthropic.com>
156 lines
4.1 KiB
JavaScript
156 lines
4.1 KiB
JavaScript
import express from 'express';
|
|
import cors from 'cors';
|
|
import axios from 'axios';
|
|
|
|
const app = express();
|
|
const PORT = 3001;
|
|
|
|
// Ollama API 設定
|
|
const OLLAMA_API_URL = "https://ollama_pjapi.theaken.com";
|
|
const MODEL_NAME = "qwen2.5:3b"; // 使用 qwen2.5:3b 模型
|
|
|
|
app.use(cors());
|
|
app.use(express.json());
|
|
|
|
// 健康檢查端點
|
|
app.get('/health', (req, res) => {
|
|
res.json({ status: 'ok', message: 'Server is running' });
|
|
});
|
|
|
|
// 列出可用模型
|
|
app.get('/api/models', async (req, res) => {
|
|
try {
|
|
const response = await axios.get(`${OLLAMA_API_URL}/v1/models`);
|
|
res.json(response.data);
|
|
} catch (error) {
|
|
console.error('Error fetching models:', error.message);
|
|
res.status(500).json({ error: 'Failed to fetch models', details: error.message });
|
|
}
|
|
});
|
|
|
|
// 5 Why 分析端點
|
|
app.post('/api/analyze', async (req, res) => {
|
|
const { prompt } = req.body;
|
|
|
|
if (!prompt) {
|
|
return res.status(400).json({ error: 'Prompt is required' });
|
|
}
|
|
|
|
try {
|
|
console.log('Sending request to Ollama API...');
|
|
|
|
const chatRequest = {
|
|
model: MODEL_NAME,
|
|
messages: [
|
|
{
|
|
role: "system",
|
|
content: "You are an expert consultant specializing in 5 Why root cause analysis. You always respond in valid JSON format without any markdown code blocks."
|
|
},
|
|
{
|
|
role: "user",
|
|
content: prompt
|
|
}
|
|
],
|
|
temperature: 0.7,
|
|
stream: false
|
|
};
|
|
|
|
const response = await axios.post(
|
|
`${OLLAMA_API_URL}/v1/chat/completions`,
|
|
chatRequest,
|
|
{
|
|
headers: {
|
|
'Content-Type': 'application/json'
|
|
},
|
|
timeout: 120000 // 120 seconds timeout
|
|
}
|
|
);
|
|
|
|
if (response.data && response.data.choices && response.data.choices[0]) {
|
|
const content = response.data.choices[0].message.content;
|
|
console.log('Received response from Ollama');
|
|
res.json({ content });
|
|
} else {
|
|
throw new Error('Invalid response format from Ollama API');
|
|
}
|
|
|
|
} catch (error) {
|
|
console.error('Error calling Ollama API:', error.message);
|
|
if (error.response) {
|
|
console.error('Response data:', error.response.data);
|
|
console.error('Response status:', error.response.status);
|
|
}
|
|
res.status(500).json({
|
|
error: 'Failed to analyze with Ollama API',
|
|
details: error.message,
|
|
responseData: error.response?.data
|
|
});
|
|
}
|
|
});
|
|
|
|
// 翻譯端點
|
|
app.post('/api/translate', async (req, res) => {
|
|
const { prompt } = req.body;
|
|
|
|
if (!prompt) {
|
|
return res.status(400).json({ error: 'Prompt is required' });
|
|
}
|
|
|
|
try {
|
|
console.log('Translating with Ollama API...');
|
|
|
|
const chatRequest = {
|
|
model: MODEL_NAME,
|
|
messages: [
|
|
{
|
|
role: "system",
|
|
content: "You are a professional translator. You always respond in valid JSON format without any markdown code blocks."
|
|
},
|
|
{
|
|
role: "user",
|
|
content: prompt
|
|
}
|
|
],
|
|
temperature: 0.3,
|
|
stream: false
|
|
};
|
|
|
|
const response = await axios.post(
|
|
`${OLLAMA_API_URL}/v1/chat/completions`,
|
|
chatRequest,
|
|
{
|
|
headers: {
|
|
'Content-Type': 'application/json'
|
|
},
|
|
timeout: 120000
|
|
}
|
|
);
|
|
|
|
if (response.data && response.data.choices && response.data.choices[0]) {
|
|
const content = response.data.choices[0].message.content;
|
|
console.log('Translation completed');
|
|
res.json({ content });
|
|
} else {
|
|
throw new Error('Invalid response format from Ollama API');
|
|
}
|
|
|
|
} catch (error) {
|
|
console.error('Error translating with Ollama API:', error.message);
|
|
if (error.response) {
|
|
console.error('Response data:', error.response.data);
|
|
console.error('Response status:', error.response.status);
|
|
}
|
|
res.status(500).json({
|
|
error: 'Failed to translate with Ollama API',
|
|
details: error.message,
|
|
responseData: error.response?.data
|
|
});
|
|
}
|
|
});
|
|
|
|
app.listen(PORT, () => {
|
|
console.log(`Server is running on http://localhost:${PORT}`);
|
|
console.log(`Ollama API URL: ${OLLAMA_API_URL}`);
|
|
console.log(`Using model: ${MODEL_NAME}`);
|
|
});
|