Files
hr-position-system/llm_config.py
DonaldFang 方士碩 12ceccc3d3 refactor: 新增 ui.js 和 main.js 模組,啟用 ES6 Modules
新增檔案:
- js/ui.js - UI 操作、模組切換、預覽更新、表單資料收集
- js/main.js - 主程式初始化、事件監聽器設置、快捷鍵

更新檔案:
- index.html - 引用 ES6 模組 (type="module")

功能:
 模組切換功能
 標籤頁切換
 表單欄位監聽
 JSON 預覽更新
 快捷鍵支援 (Ctrl+S, Ctrl+N)
 用戶信息載入
 登出功能

注意:
- 大部分 JavaScript 代碼仍在 HTML 中(約 2400 行)
- 已建立核心模組架構,便於後續逐步遷移
- 使用 ES6 Modules,需要通過 HTTP Server 運行

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-12-05 17:18:28 +08:00

441 lines
16 KiB
Python

"""
LLM API Configuration Module
Supports Gemini, DeepSeek, and OpenAI APIs with connection testing
"""
import os
import requests
from dotenv import load_dotenv
from typing import Dict, List, Tuple
import urllib3
# Disable SSL warnings for Ollama endpoint
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Load environment variables
load_dotenv()
class LLMConfig:
"""LLM API configuration and management"""
def __init__(self):
self.apis = {
'gemini': {
'name': 'Google Gemini',
'api_key': os.getenv('GEMINI_API_KEY', ''),
'endpoint': 'https://generativelanguage.googleapis.com/v1/models',
'enabled': bool(os.getenv('GEMINI_API_KEY'))
},
'deepseek': {
'name': 'DeepSeek',
'api_key': os.getenv('DEEPSEEK_API_KEY', ''),
'endpoint': os.getenv('DEEPSEEK_API_URL', 'https://api.deepseek.com/v1'),
'enabled': bool(os.getenv('DEEPSEEK_API_KEY'))
},
'openai': {
'name': 'OpenAI',
'api_key': os.getenv('OPENAI_API_KEY', ''),
'endpoint': os.getenv('OPENAI_API_URL', 'https://api.openai.com/v1'),
'enabled': bool(os.getenv('OPENAI_API_KEY'))
},
'ollama': {
'name': 'Ollama',
'api_key': '', # Ollama 不需要 API Key
'endpoint': os.getenv('OLLAMA_API_URL', 'https://ollama_pjapi.theaken.com'),
'model': os.getenv('OLLAMA_MODEL', 'qwen2.5:3b'),
'enabled': True # Ollama 預設啟用
},
'gptoss': {
'name': 'GPT-OSS',
'api_key': '', # GPT-OSS 不需要 API Key (使用 Ollama 介面)
'endpoint': os.getenv('GPTOSS_API_URL', 'https://ollama_pjapi.theaken.com'),
'model': os.getenv('GPTOSS_MODEL', 'gpt-oss:120b'),
'enabled': True # GPT-OSS 預設啟用
}
}
def get_enabled_apis(self) -> List[str]:
"""Get list of enabled APIs"""
return [key for key, config in self.apis.items() if config['enabled']]
def get_api_config(self, api_name: str) -> Dict:
"""Get configuration for specific API"""
return self.apis.get(api_name, {})
def test_gemini_connection(self) -> Tuple[bool, str]:
"""Test Gemini API connection"""
try:
api_key = self.apis['gemini']['api_key']
if not api_key:
return False, "API Key 未設定"
# Test endpoint - list models
url = f"https://generativelanguage.googleapis.com/v1beta/models?key={api_key}"
response = requests.get(url, timeout=10)
if response.status_code == 200:
data = response.json()
models = data.get('models', [])
model_count = len(models)
return True, f"連線成功!找到 {model_count} 個可用模型"
elif response.status_code == 400:
return False, "API Key 格式錯誤"
elif response.status_code == 403:
return False, "API Key 無效或權限不足"
else:
return False, f"連線失敗 (HTTP {response.status_code})"
except requests.exceptions.Timeout:
return False, "連線逾時"
except requests.exceptions.ConnectionError:
return False, "無法連接到伺服器"
except Exception as e:
return False, f"錯誤: {str(e)}"
def test_deepseek_connection(self) -> Tuple[bool, str]:
"""Test DeepSeek API connection"""
try:
api_key = self.apis['deepseek']['api_key']
endpoint = self.apis['deepseek']['endpoint']
if not api_key:
return False, "API Key 未設定"
# Test endpoint - list models
url = f"{endpoint}/models"
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
response = requests.get(url, headers=headers, timeout=10)
if response.status_code == 200:
data = response.json()
models = data.get('data', [])
if models:
model_count = len(models)
return True, f"連線成功!找到 {model_count} 個可用模型"
else:
return True, "連線成功!"
elif response.status_code == 401:
return False, "API Key 無效"
elif response.status_code == 403:
return False, "權限不足"
else:
return False, f"連線失敗 (HTTP {response.status_code})"
except requests.exceptions.Timeout:
return False, "連線逾時"
except requests.exceptions.ConnectionError:
return False, "無法連接到伺服器"
except Exception as e:
return False, f"錯誤: {str(e)}"
def test_openai_connection(self) -> Tuple[bool, str]:
"""Test OpenAI API connection"""
try:
api_key = self.apis['openai']['api_key']
endpoint = self.apis['openai']['endpoint']
if not api_key:
return False, "API Key 未設定"
# Test endpoint - list models
url = f"{endpoint}/models"
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
response = requests.get(url, headers=headers, timeout=10)
if response.status_code == 200:
data = response.json()
models = data.get('data', [])
model_count = len(models)
return True, f"連線成功!找到 {model_count} 個可用模型"
elif response.status_code == 401:
return False, "API Key 無效"
elif response.status_code == 403:
return False, "權限不足"
else:
return False, f"連線失敗 (HTTP {response.status_code})"
except requests.exceptions.Timeout:
return False, "連線逾時"
except requests.exceptions.ConnectionError:
return False, "無法連接到伺服器"
except Exception as e:
return False, f"錯誤: {str(e)}"
def test_ollama_connection(self) -> Tuple[bool, str]:
"""Test Ollama API connection"""
try:
endpoint = self.apis['ollama']['endpoint']
# Test endpoint - list models
url = f"{endpoint}/v1/models"
response = requests.get(url, timeout=10, verify=False)
if response.status_code == 200:
data = response.json()
models = data.get('data', [])
if models:
model_count = len(models)
model_names = [m.get('id', '') for m in models[:3]]
return True, f"連線成功!找到 {model_count} 個可用模型 (例如: {', '.join(model_names)})"
else:
return True, "連線成功!"
else:
return False, f"連線失敗 (HTTP {response.status_code})"
except requests.exceptions.Timeout:
return False, "連線逾時"
except requests.exceptions.ConnectionError:
return False, "無法連接到伺服器"
except Exception as e:
return False, f"錯誤: {str(e)}"
def test_all_connections(self) -> Dict[str, Tuple[bool, str]]:
"""Test all configured API connections"""
results = {}
if self.apis['gemini']['enabled']:
results['gemini'] = self.test_gemini_connection()
if self.apis['deepseek']['enabled']:
results['deepseek'] = self.test_deepseek_connection()
if self.apis['openai']['enabled']:
results['openai'] = self.test_openai_connection()
if self.apis['ollama']['enabled']:
results['ollama'] = self.test_ollama_connection()
return results
def generate_text_gemini(self, prompt: str, max_tokens: int = 2000) -> Tuple[bool, str]:
"""Generate text using Gemini API"""
try:
api_key = self.apis['gemini']['api_key']
if not api_key:
return False, "API Key 未設定"
# 從環境變數讀取模型名稱,默認使用 gemini-1.5-flash
model_name = os.getenv('GEMINI_MODEL', 'gemini-1.5-flash')
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model_name}:generateContent?key={api_key}"
data = {
"contents": [
{
"parts": [
{"text": prompt}
]
}
],
"generationConfig": {
"maxOutputTokens": max_tokens,
"temperature": 0.7
}
}
response = requests.post(url, json=data, timeout=30)
if response.status_code == 200:
result = response.json()
text = result['candidates'][0]['content']['parts'][0]['text']
return True, text
else:
return False, f"生成失敗 (HTTP {response.status_code}): {response.text}"
except Exception as e:
return False, f"錯誤: {str(e)}"
def generate_text_deepseek(self, prompt: str, max_tokens: int = 2000) -> Tuple[bool, str]:
"""Generate text using DeepSeek API"""
try:
api_key = self.apis['deepseek']['api_key']
endpoint = self.apis['deepseek']['endpoint']
if not api_key:
return False, "API Key 未設定"
url = f"{endpoint}/chat/completions"
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
data = {
"model": "deepseek-chat",
"messages": [
{"role": "user", "content": prompt}
],
"max_tokens": max_tokens,
"temperature": 0.7
}
response = requests.post(url, json=data, headers=headers, timeout=30)
if response.status_code == 200:
result = response.json()
text = result['choices'][0]['message']['content']
return True, text
else:
return False, f"生成失敗 (HTTP {response.status_code}): {response.text}"
except Exception as e:
return False, f"錯誤: {str(e)}"
def generate_text_openai(self, prompt: str, model: str = "gpt-3.5-turbo", max_tokens: int = 2000) -> Tuple[bool, str]:
"""Generate text using OpenAI API"""
try:
api_key = self.apis['openai']['api_key']
endpoint = self.apis['openai']['endpoint']
if not api_key:
return False, "API Key 未設定"
url = f"{endpoint}/chat/completions"
headers = {
'Authorization': f'Bearer {api_key}',
'Content-Type': 'application/json'
}
data = {
"model": model,
"messages": [
{"role": "user", "content": prompt}
],
"max_tokens": max_tokens,
"temperature": 0.7
}
response = requests.post(url, json=data, headers=headers, timeout=30)
if response.status_code == 200:
result = response.json()
text = result['choices'][0]['message']['content']
return True, text
else:
return False, f"生成失敗 (HTTP {response.status_code}): {response.text}"
except Exception as e:
return False, f"錯誤: {str(e)}"
def generate_text_ollama(self, prompt: str, max_tokens: int = 2000, model: str = None) -> Tuple[bool, str]:
"""Generate text using Ollama API
Args:
prompt: The prompt text
max_tokens: Maximum tokens to generate (not used by Ollama but kept for compatibility)
model: The model to use. If None, uses the default from config.
"""
try:
endpoint = self.apis['ollama']['endpoint']
# 使用傳入的 model 參數,如果沒有則使用設定檔中的預設值
if model is None:
model = self.apis['ollama']['model']
url = f"{endpoint}/v1/chat/completions"
headers = {
'Content-Type': 'application/json'
}
data = {
"model": model,
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
],
"temperature": 0.7
}
response = requests.post(url, json=data, headers=headers, timeout=60, verify=False)
if response.status_code == 200:
result = response.json()
text = result['choices'][0]['message']['content']
return True, text
else:
return False, f"生成失敗 (HTTP {response.status_code}): {response.text}"
except Exception as e:
return False, f"錯誤: {str(e)}"
def generate_text_gptoss(self, prompt: str, max_tokens: int = 2000, model: str = None) -> Tuple[bool, str]:
"""Generate text using GPT-OSS API (120B model via Ollama interface)
Args:
prompt: The prompt text
max_tokens: Maximum tokens to generate (not used by Ollama but kept for compatibility)
model: The model to use. If None, uses the default from config.
"""
try:
endpoint = self.apis['gptoss']['endpoint']
# 使用傳入的 model 參數,如果沒有則使用設定檔中的預設值
if model is None:
model = self.apis['gptoss']['model']
url = f"{endpoint}/v1/chat/completions"
headers = {
'Content-Type': 'application/json'
}
data = {
"model": model,
"messages": [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt}
],
"temperature": 0.7
}
response = requests.post(url, json=data, headers=headers, timeout=60, verify=False)
if response.status_code == 200:
result = response.json()
text = result['choices'][0]['message']['content']
return True, text
else:
return False, f"生成失敗 (HTTP {response.status_code}): {response.text}"
except Exception as e:
return False, f"錯誤: {str(e)}"
def main():
"""Test script"""
print("=" * 60)
print("LLM API Configuration Test")
print("=" * 60)
print()
config = LLMConfig()
# Show enabled APIs
enabled = config.get_enabled_apis()
print(f"已啟用的 API: {', '.join(enabled) if enabled else ''}")
print()
# Test all connections
print("測試 API 連線:")
print("-" * 60)
results = config.test_all_connections()
for api_name, (success, message) in results.items():
status = "" if success else ""
api_display_name = config.apis[api_name]['name']
print(f"{status} {api_display_name}: {message}")
print()
if __name__ == '__main__':
main()