企業內部新聞彙整與分析系統 - 自動新聞抓取 (Digitimes, 經濟日報, 工商時報) - AI 智慧摘要 (OpenAI/Claude/Ollama) - 群組管理與訂閱通知 - 已清理 Python 快取檔案 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
177 lines
5.7 KiB
Python
177 lines
5.7 KiB
Python
"""
|
||
LLM 服務模組
|
||
支援 Google Gemini、OpenAI、Ollama
|
||
"""
|
||
import time
|
||
from typing import Optional
|
||
import httpx
|
||
|
||
from app.core.config import settings
|
||
|
||
|
||
def get_llm_client():
|
||
"""取得 LLM 客戶端"""
|
||
provider = settings.llm_provider
|
||
|
||
if provider == "gemini":
|
||
import google.generativeai as genai
|
||
genai.configure(api_key=settings.gemini_api_key)
|
||
return genai
|
||
elif provider == "openai":
|
||
from openai import OpenAI
|
||
return OpenAI(api_key=settings.openai_api_key)
|
||
elif provider == "ollama":
|
||
return None # 使用 httpx 直接呼叫
|
||
|
||
raise ValueError(f"不支援的 LLM 提供者: {provider}")
|
||
|
||
|
||
def generate_summary(group, articles: list) -> str:
|
||
"""
|
||
產生 AI 摘要
|
||
|
||
Args:
|
||
group: 群組物件(包含 ai_background, ai_prompt)
|
||
articles: 新聞文章列表
|
||
|
||
Returns:
|
||
綜合摘要文字
|
||
"""
|
||
if not articles:
|
||
return "無相關新聞可供摘要。"
|
||
|
||
# 組合文章內容
|
||
articles_text = ""
|
||
for i, article in enumerate(articles, 1):
|
||
articles_text += f"""
|
||
---
|
||
新聞 {i}:{article.title}
|
||
來源:{article.source.name if article.source else '未知'}
|
||
內容:{article.content[:1000] if article.content else article.summary or '無內容'}
|
||
---
|
||
"""
|
||
|
||
# 建立 Prompt
|
||
system_prompt = f"""你是一位專業的產業分析師,負責彙整每日新聞並產出精闘的綜合分析報告。
|
||
|
||
背景資訊:
|
||
{group.ai_background or '無特定背景資訊'}
|
||
|
||
摘要方向:
|
||
{group.ai_prompt or '請綜合分析以下新聞的重點、趨勢與潛在影響。'}
|
||
"""
|
||
|
||
user_prompt = f"""請根據以下 {len(articles)} 則新聞,產出一份繁體中文的綜合分析報告:
|
||
|
||
{articles_text}
|
||
|
||
請注意:
|
||
1. 使用繁體中文
|
||
2. 整合相關主題,避免逐條列舉
|
||
3. 突出重要趨勢與影響
|
||
4. 控制在 500 字以內
|
||
"""
|
||
|
||
provider = settings.llm_provider
|
||
|
||
try:
|
||
if provider == "gemini":
|
||
import google.generativeai as genai
|
||
genai.configure(api_key=settings.gemini_api_key)
|
||
model = genai.GenerativeModel(settings.gemini_model or "gemini-1.5-pro")
|
||
response = model.generate_content(
|
||
f"{system_prompt}\n\n{user_prompt}",
|
||
generation_config={
|
||
"temperature": 0.7,
|
||
"max_output_tokens": 2048,
|
||
"top_p": 0.95,
|
||
"top_k": 40
|
||
}
|
||
)
|
||
return response.text
|
||
|
||
elif provider == "openai":
|
||
from openai import OpenAI
|
||
client = OpenAI(api_key=settings.openai_api_key)
|
||
response = client.chat.completions.create(
|
||
model=settings.openai_model or "gpt-4o",
|
||
messages=[
|
||
{"role": "system", "content": system_prompt},
|
||
{"role": "user", "content": user_prompt}
|
||
],
|
||
max_tokens=2048,
|
||
temperature=0.7
|
||
)
|
||
return response.choices[0].message.content
|
||
|
||
elif provider == "ollama":
|
||
response = httpx.post(
|
||
f"{settings.ollama_endpoint}/api/generate",
|
||
json={
|
||
"model": settings.ollama_model or "llama3",
|
||
"prompt": f"{system_prompt}\n\n{user_prompt}",
|
||
"stream": False,
|
||
"options": {
|
||
"temperature": 0.7,
|
||
"num_predict": 2048,
|
||
"top_p": 0.9,
|
||
"top_k": 40
|
||
}
|
||
},
|
||
timeout=120
|
||
)
|
||
return response.json().get("response", "")
|
||
|
||
except Exception as e:
|
||
return f"摘要產生失敗:{str(e)}"
|
||
|
||
|
||
def test_llm_connection(provider: str, model: str) -> dict:
|
||
"""
|
||
測試 LLM 連線
|
||
|
||
Returns:
|
||
{"success": bool, "response_time_ms": int, "message": str}
|
||
"""
|
||
start_time = time.time()
|
||
|
||
try:
|
||
if provider == "gemini":
|
||
import google.generativeai as genai
|
||
genai.configure(api_key=settings.gemini_api_key)
|
||
gen_model = genai.GenerativeModel(model)
|
||
response = gen_model.generate_content(
|
||
"Hello",
|
||
generation_config={"max_output_tokens": 10}
|
||
)
|
||
elapsed = int((time.time() - start_time) * 1000)
|
||
return {"success": True, "response_time_ms": elapsed}
|
||
|
||
elif provider == "openai":
|
||
from openai import OpenAI
|
||
client = OpenAI(api_key=settings.openai_api_key)
|
||
response = client.chat.completions.create(
|
||
model=model,
|
||
messages=[{"role": "user", "content": "Hello"}],
|
||
max_tokens=10
|
||
)
|
||
elapsed = int((time.time() - start_time) * 1000)
|
||
return {"success": True, "response_time_ms": elapsed}
|
||
|
||
elif provider == "ollama":
|
||
response = httpx.post(
|
||
f"{settings.ollama_endpoint}/api/generate",
|
||
json={"model": model, "prompt": "Hello", "stream": False},
|
||
timeout=30
|
||
)
|
||
elapsed = int((time.time() - start_time) * 1000)
|
||
if response.status_code == 200:
|
||
return {"success": True, "response_time_ms": elapsed}
|
||
return {"success": False, "message": f"HTTP {response.status_code}"}
|
||
|
||
return {"success": False, "message": f"不支援的提供者: {provider}"}
|
||
|
||
except Exception as e:
|
||
elapsed = int((time.time() - start_time) * 1000)
|
||
return {"success": False, "response_time_ms": elapsed, "message": str(e)}
|