Phase 0: 專案初始化 - 建立專案結構、環境設定與 LLM 服務模組

This commit is contained in:
2025-12-12 12:53:24 +08:00
commit 177e8e8fe9
11 changed files with 352 additions and 0 deletions

19
.env.example Normal file
View File

@@ -0,0 +1,19 @@
# Flask
FLASK_SECRET_KEY=your-secret-key-change-in-production
FLASK_DEBUG=True
# Database (MySQL)
DB_HOST=your-db-host
DB_PORT=3306
DB_NAME=your-db-name
DB_USER=your-db-user
DB_PASSWORD=your-db-password
# Ollama API
OLLAMA_API_URL=https://your-ollama-api-url
OLLAMA_DEFAULT_MODEL=qwen2.5:3b
# Gitea
GITEA_URL=https://your-gitea-url
GITEA_USER=your-username
GITEA_TOKEN=your-token

49
.gitignore vendored Normal file
View File

@@ -0,0 +1,49 @@
# Environment
.env
venv/
env/
.venv/
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
# IDE
.idea/
.vscode/
*.swp
*.swo
*~
# OS
.DS_Store
Thumbs.db
# Logs
*.log
logs/
# Database
*.db
*.sqlite3
# Uploads
uploads/

34
README.md Normal file
View File

@@ -0,0 +1,34 @@
# DIT_C
## 快速開始
```bash
# 安裝依賴
pip install -r requirements.txt
# 設定環境變數
cp .env.example .env
# 編輯 .env 填入正確的資料庫與 API 資訊
# 啟動服務
python app.py
```
## 測試端點
- 健康檢查: `GET /health`
- LLM 連線測試: `GET /test-llm`
## 專案結構
```
DIT_C/
├── app.py # 主程式入口
├── config.py # 設定檔
├── models/ # 資料庫模型
├── routes/ # 路由模組
├── services/ # 商業邏輯 (含 LLM 服務)
├── utils/ # 工具函式
├── templates/ # HTML 模板
└── static/ # 靜態資源
```

50
app.py Normal file
View File

@@ -0,0 +1,50 @@
"""
DIT_C 主程式入口
"""
from flask import Flask, jsonify
from config import Config
from models import db
def create_app(config_class=Config):
"""應用程式工廠"""
app = Flask(__name__)
app.config.from_object(config_class)
# 初始化擴展
db.init_app(app)
# 註冊 Blueprint (後續擴展)
# from routes.auth import auth_bp
# from routes.admin import admin_bp
# from routes.api import api_bp
# app.register_blueprint(auth_bp)
# app.register_blueprint(admin_bp)
# app.register_blueprint(api_bp)
# 健康檢查端點
@app.route('/health')
def health_check():
return jsonify({"status": "ok", "message": "DIT_C is running"})
# 測試 LLM 連線
@app.route('/test-llm')
def test_llm():
from services.llm_service import llm_service, LLMServiceError
try:
models = llm_service.get_available_models()
return jsonify({
"status": "ok",
"available_models": models,
"default_model": Config.OLLAMA_DEFAULT_MODEL
})
except LLMServiceError as e:
return jsonify({"status": "error", "message": str(e)}), 500
return app
if __name__ == '__main__':
app = create_app()
app.run(host='0.0.0.0', port=5000, debug=Config.DEBUG)

37
config.py Normal file
View File

@@ -0,0 +1,37 @@
import os
from dotenv import load_dotenv
load_dotenv()
class Config:
"""應用程式設定"""
# Flask
SECRET_KEY = os.getenv('FLASK_SECRET_KEY', 'dev-secret-key')
DEBUG = os.getenv('FLASK_DEBUG', 'False').lower() == 'true'
# Database (MySQL)
DB_HOST = os.getenv('DB_HOST', 'localhost')
DB_PORT = int(os.getenv('DB_PORT', 3306))
DB_NAME = os.getenv('DB_NAME', 'database')
DB_USER = os.getenv('DB_USER', 'root')
DB_PASSWORD = os.getenv('DB_PASSWORD', '')
# SQLAlchemy 連線字串
SQLALCHEMY_DATABASE_URI = (
f"mysql+pymysql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
)
SQLALCHEMY_TRACK_MODIFICATIONS = False
# Table 前綴
TABLE_PREFIX = 'DIT_C_'
# Ollama API
OLLAMA_API_URL = os.getenv('OLLAMA_API_URL', 'https://ollama_pjapi.theaken.com')
OLLAMA_DEFAULT_MODEL = os.getenv('OLLAMA_DEFAULT_MODEL', 'qwen2.5:3b')
# Gitea
GITEA_URL = os.getenv('GITEA_URL', '')
GITEA_USER = os.getenv('GITEA_USER', '')
GITEA_TOKEN = os.getenv('GITEA_TOKEN', '')

3
models/__init__.py Normal file
View File

@@ -0,0 +1,3 @@
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()

7
requirements.txt Normal file
View File

@@ -0,0 +1,7 @@
flask>=3.0.0
flask-sqlalchemy>=3.1.0
flask-login>=0.6.3
python-dotenv>=1.0.0
pymysql>=1.1.0
cryptography>=41.0.0
requests>=2.31.0

1
routes/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Routes module

1
services/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Services module

150
services/llm_service.py Normal file
View File

@@ -0,0 +1,150 @@
"""
Ollama LLM API 服務模組
支援一般請求與串流模式
"""
import requests
import json
from typing import Generator, Optional
from config import Config
class LLMService:
"""Ollama API 服務封裝"""
def __init__(self, api_url: str = None, default_model: str = None):
self.api_url = api_url or Config.OLLAMA_API_URL
self.default_model = default_model or Config.OLLAMA_DEFAULT_MODEL
def get_available_models(self) -> list:
"""取得可用模型列表"""
try:
response = requests.get(f"{self.api_url}/v1/models", timeout=10)
response.raise_for_status()
models = response.json()
return [m['id'] for m in models.get('data', [])]
except requests.RequestException as e:
raise LLMServiceError(f"無法取得模型列表: {e}")
def chat(
self,
messages: list,
model: str = None,
temperature: float = 0.7,
system_prompt: str = None
) -> str:
"""
發送聊天請求 (非串流)
Args:
messages: 訊息列表 [{"role": "user", "content": "..."}]
model: 模型名稱
temperature: 溫度參數 (0-1)
system_prompt: 系統提示詞
Returns:
AI 回應內容
"""
model = model or self.default_model
# 加入系統提示詞
if system_prompt:
messages = [{"role": "system", "content": system_prompt}] + messages
payload = {
"model": model,
"messages": messages,
"temperature": temperature,
"stream": False
}
try:
response = requests.post(
f"{self.api_url}/v1/chat/completions",
json=payload,
timeout=60
)
response.raise_for_status()
result = response.json()
return result['choices'][0]['message']['content']
except requests.RequestException as e:
raise LLMServiceError(f"聊天請求失敗: {e}")
def chat_stream(
self,
messages: list,
model: str = None,
temperature: float = 0.7,
system_prompt: str = None
) -> Generator[str, None, None]:
"""
發送聊天請求 (串流模式)
Args:
messages: 訊息列表
model: 模型名稱
temperature: 溫度參數
system_prompt: 系統提示詞
Yields:
串流回應的每個片段
"""
model = model or self.default_model
if system_prompt:
messages = [{"role": "system", "content": system_prompt}] + messages
payload = {
"model": model,
"messages": messages,
"temperature": temperature,
"stream": True
}
try:
response = requests.post(
f"{self.api_url}/v1/chat/completions",
json=payload,
stream=True,
timeout=120
)
response.raise_for_status()
for line in response.iter_lines():
if line:
if line.startswith(b"data: "):
data_str = line[6:].decode('utf-8')
if data_str.strip() != "[DONE]":
try:
data = json.loads(data_str)
if 'choices' in data:
delta = data['choices'][0].get('delta', {})
if 'content' in delta:
yield delta['content']
except json.JSONDecodeError:
continue
except requests.RequestException as e:
raise LLMServiceError(f"串流請求失敗: {e}")
def simple_query(self, prompt: str, system_prompt: str = None) -> str:
"""
簡單查詢 (單一問題)
Args:
prompt: 使用者問題
system_prompt: 系統提示詞
Returns:
AI 回應
"""
messages = [{"role": "user", "content": prompt}]
return self.chat(messages, system_prompt=system_prompt)
class LLMServiceError(Exception):
"""LLM 服務錯誤"""
pass
# 全域實例
llm_service = LLMService()

1
utils/__init__.py Normal file
View File

@@ -0,0 +1 @@
# Utils module