From 177e8e8fe9dd2dc38f55f171480e69c77f98bd3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?DonaldFang=20=E6=96=B9=E5=A3=AB=E7=A2=A9?= Date: Fri, 12 Dec 2025 12:53:24 +0800 Subject: [PATCH] =?UTF-8?q?Phase=200:=20=E5=B0=88=E6=A1=88=E5=88=9D?= =?UTF-8?q?=E5=A7=8B=E5=8C=96=20-=20=E5=BB=BA=E7=AB=8B=E5=B0=88=E6=A1=88?= =?UTF-8?q?=E7=B5=90=E6=A7=8B=E3=80=81=E7=92=B0=E5=A2=83=E8=A8=AD=E5=AE=9A?= =?UTF-8?q?=E8=88=87=20LLM=20=E6=9C=8D=E5=8B=99=E6=A8=A1=E7=B5=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .env.example | 19 +++++ .gitignore | 49 +++++++++++++ README.md | 34 +++++++++ app.py | 50 ++++++++++++++ config.py | 37 ++++++++++ models/__init__.py | 3 + requirements.txt | 7 ++ routes/__init__.py | 1 + services/__init__.py | 1 + services/llm_service.py | 150 ++++++++++++++++++++++++++++++++++++++++ utils/__init__.py | 1 + 11 files changed, 352 insertions(+) create mode 100644 .env.example create mode 100644 .gitignore create mode 100644 README.md create mode 100644 app.py create mode 100644 config.py create mode 100644 models/__init__.py create mode 100644 requirements.txt create mode 100644 routes/__init__.py create mode 100644 services/__init__.py create mode 100644 services/llm_service.py create mode 100644 utils/__init__.py diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..8a9374a --- /dev/null +++ b/.env.example @@ -0,0 +1,19 @@ +# Flask +FLASK_SECRET_KEY=your-secret-key-change-in-production +FLASK_DEBUG=True + +# Database (MySQL) +DB_HOST=your-db-host +DB_PORT=3306 +DB_NAME=your-db-name +DB_USER=your-db-user +DB_PASSWORD=your-db-password + +# Ollama API +OLLAMA_API_URL=https://your-ollama-api-url +OLLAMA_DEFAULT_MODEL=qwen2.5:3b + +# Gitea +GITEA_URL=https://your-gitea-url +GITEA_USER=your-username +GITEA_TOKEN=your-token diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..142fde4 --- /dev/null +++ b/.gitignore @@ -0,0 +1,49 @@ +# Environment +.env +venv/ +env/ +.venv/ + +# Python +__pycache__/ +*.py[cod] +*$py.class +*.so +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +*.egg-info/ +.installed.cfg +*.egg + +# IDE +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Logs +*.log +logs/ + +# Database +*.db +*.sqlite3 + +# Uploads +uploads/ diff --git a/README.md b/README.md new file mode 100644 index 0000000..70f9619 --- /dev/null +++ b/README.md @@ -0,0 +1,34 @@ +# DIT_C + +## 快速開始 + +```bash +# 安裝依賴 +pip install -r requirements.txt + +# 設定環境變數 +cp .env.example .env +# 編輯 .env 填入正確的資料庫與 API 資訊 + +# 啟動服務 +python app.py +``` + +## 測試端點 + +- 健康檢查: `GET /health` +- LLM 連線測試: `GET /test-llm` + +## 專案結構 + +``` +DIT_C/ +├── app.py # 主程式入口 +├── config.py # 設定檔 +├── models/ # 資料庫模型 +├── routes/ # 路由模組 +├── services/ # 商業邏輯 (含 LLM 服務) +├── utils/ # 工具函式 +├── templates/ # HTML 模板 +└── static/ # 靜態資源 +``` diff --git a/app.py b/app.py new file mode 100644 index 0000000..593bc27 --- /dev/null +++ b/app.py @@ -0,0 +1,50 @@ +""" +DIT_C 主程式入口 +""" + +from flask import Flask, jsonify +from config import Config +from models import db + + +def create_app(config_class=Config): + """應用程式工廠""" + app = Flask(__name__) + app.config.from_object(config_class) + + # 初始化擴展 + db.init_app(app) + + # 註冊 Blueprint (後續擴展) + # from routes.auth import auth_bp + # from routes.admin import admin_bp + # from routes.api import api_bp + # app.register_blueprint(auth_bp) + # app.register_blueprint(admin_bp) + # app.register_blueprint(api_bp) + + # 健康檢查端點 + @app.route('/health') + def health_check(): + return jsonify({"status": "ok", "message": "DIT_C is running"}) + + # 測試 LLM 連線 + @app.route('/test-llm') + def test_llm(): + from services.llm_service import llm_service, LLMServiceError + try: + models = llm_service.get_available_models() + return jsonify({ + "status": "ok", + "available_models": models, + "default_model": Config.OLLAMA_DEFAULT_MODEL + }) + except LLMServiceError as e: + return jsonify({"status": "error", "message": str(e)}), 500 + + return app + + +if __name__ == '__main__': + app = create_app() + app.run(host='0.0.0.0', port=5000, debug=Config.DEBUG) diff --git a/config.py b/config.py new file mode 100644 index 0000000..8d5ca15 --- /dev/null +++ b/config.py @@ -0,0 +1,37 @@ +import os +from dotenv import load_dotenv + +load_dotenv() + + +class Config: + """應用程式設定""" + + # Flask + SECRET_KEY = os.getenv('FLASK_SECRET_KEY', 'dev-secret-key') + DEBUG = os.getenv('FLASK_DEBUG', 'False').lower() == 'true' + + # Database (MySQL) + DB_HOST = os.getenv('DB_HOST', 'localhost') + DB_PORT = int(os.getenv('DB_PORT', 3306)) + DB_NAME = os.getenv('DB_NAME', 'database') + DB_USER = os.getenv('DB_USER', 'root') + DB_PASSWORD = os.getenv('DB_PASSWORD', '') + + # SQLAlchemy 連線字串 + SQLALCHEMY_DATABASE_URI = ( + f"mysql+pymysql://{DB_USER}:{DB_PASSWORD}@{DB_HOST}:{DB_PORT}/{DB_NAME}" + ) + SQLALCHEMY_TRACK_MODIFICATIONS = False + + # Table 前綴 + TABLE_PREFIX = 'DIT_C_' + + # Ollama API + OLLAMA_API_URL = os.getenv('OLLAMA_API_URL', 'https://ollama_pjapi.theaken.com') + OLLAMA_DEFAULT_MODEL = os.getenv('OLLAMA_DEFAULT_MODEL', 'qwen2.5:3b') + + # Gitea + GITEA_URL = os.getenv('GITEA_URL', '') + GITEA_USER = os.getenv('GITEA_USER', '') + GITEA_TOKEN = os.getenv('GITEA_TOKEN', '') diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..f0b13d6 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1,3 @@ +from flask_sqlalchemy import SQLAlchemy + +db = SQLAlchemy() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..633b3ee --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +flask>=3.0.0 +flask-sqlalchemy>=3.1.0 +flask-login>=0.6.3 +python-dotenv>=1.0.0 +pymysql>=1.1.0 +cryptography>=41.0.0 +requests>=2.31.0 diff --git a/routes/__init__.py b/routes/__init__.py new file mode 100644 index 0000000..1102393 --- /dev/null +++ b/routes/__init__.py @@ -0,0 +1 @@ +# Routes module diff --git a/services/__init__.py b/services/__init__.py new file mode 100644 index 0000000..0557eb6 --- /dev/null +++ b/services/__init__.py @@ -0,0 +1 @@ +# Services module diff --git a/services/llm_service.py b/services/llm_service.py new file mode 100644 index 0000000..759837a --- /dev/null +++ b/services/llm_service.py @@ -0,0 +1,150 @@ +""" +Ollama LLM API 服務模組 +支援一般請求與串流模式 +""" + +import requests +import json +from typing import Generator, Optional +from config import Config + + +class LLMService: + """Ollama API 服務封裝""" + + def __init__(self, api_url: str = None, default_model: str = None): + self.api_url = api_url or Config.OLLAMA_API_URL + self.default_model = default_model or Config.OLLAMA_DEFAULT_MODEL + + def get_available_models(self) -> list: + """取得可用模型列表""" + try: + response = requests.get(f"{self.api_url}/v1/models", timeout=10) + response.raise_for_status() + models = response.json() + return [m['id'] for m in models.get('data', [])] + except requests.RequestException as e: + raise LLMServiceError(f"無法取得模型列表: {e}") + + def chat( + self, + messages: list, + model: str = None, + temperature: float = 0.7, + system_prompt: str = None + ) -> str: + """ + 發送聊天請求 (非串流) + + Args: + messages: 訊息列表 [{"role": "user", "content": "..."}] + model: 模型名稱 + temperature: 溫度參數 (0-1) + system_prompt: 系統提示詞 + + Returns: + AI 回應內容 + """ + model = model or self.default_model + + # 加入系統提示詞 + if system_prompt: + messages = [{"role": "system", "content": system_prompt}] + messages + + payload = { + "model": model, + "messages": messages, + "temperature": temperature, + "stream": False + } + + try: + response = requests.post( + f"{self.api_url}/v1/chat/completions", + json=payload, + timeout=60 + ) + response.raise_for_status() + result = response.json() + return result['choices'][0]['message']['content'] + except requests.RequestException as e: + raise LLMServiceError(f"聊天請求失敗: {e}") + + def chat_stream( + self, + messages: list, + model: str = None, + temperature: float = 0.7, + system_prompt: str = None + ) -> Generator[str, None, None]: + """ + 發送聊天請求 (串流模式) + + Args: + messages: 訊息列表 + model: 模型名稱 + temperature: 溫度參數 + system_prompt: 系統提示詞 + + Yields: + 串流回應的每個片段 + """ + model = model or self.default_model + + if system_prompt: + messages = [{"role": "system", "content": system_prompt}] + messages + + payload = { + "model": model, + "messages": messages, + "temperature": temperature, + "stream": True + } + + try: + response = requests.post( + f"{self.api_url}/v1/chat/completions", + json=payload, + stream=True, + timeout=120 + ) + response.raise_for_status() + + for line in response.iter_lines(): + if line: + if line.startswith(b"data: "): + data_str = line[6:].decode('utf-8') + if data_str.strip() != "[DONE]": + try: + data = json.loads(data_str) + if 'choices' in data: + delta = data['choices'][0].get('delta', {}) + if 'content' in delta: + yield delta['content'] + except json.JSONDecodeError: + continue + except requests.RequestException as e: + raise LLMServiceError(f"串流請求失敗: {e}") + + def simple_query(self, prompt: str, system_prompt: str = None) -> str: + """ + 簡單查詢 (單一問題) + + Args: + prompt: 使用者問題 + system_prompt: 系統提示詞 + + Returns: + AI 回應 + """ + messages = [{"role": "user", "content": prompt}] + return self.chat(messages, system_prompt=system_prompt) + + +class LLMServiceError(Exception): + """LLM 服務錯誤""" + pass + + +# 全域實例 +llm_service = LLMService() diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000..feddb93 --- /dev/null +++ b/utils/__init__.py @@ -0,0 +1 @@ +# Utils module