Initial commit: Llama API Client with full documentation
- Added complete Python client for Llama AI models - Support for internal network endpoints (tested and working) - Support for external network endpoints (configured) - Interactive chat interface with multiple models - Automatic endpoint testing and failover - Response cleaning for special markers - Full documentation in English and Chinese - Complete test suite and examples - MIT License and contribution guidelines
This commit is contained in:
124
demo_chat.py
Normal file
124
demo_chat.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""
|
||||
Llama API 對話程式 (示範版本)
|
||||
當 API 伺服器恢復後,可以使用此程式進行對話
|
||||
"""
|
||||
|
||||
from openai import OpenAI
|
||||
import time
|
||||
|
||||
# API 設定
|
||||
API_KEY = "paVrIT+XU1NhwCAOb0X4aYi75QKogK5YNMGvQF1dCyo="
|
||||
BASE_URL = "https://llama.theaken.com/v1"
|
||||
|
||||
def simulate_chat():
|
||||
"""模擬對話功能(用於展示)"""
|
||||
print("\n" + "="*50)
|
||||
print("Llama AI 對話系統 - 示範模式")
|
||||
print("="*50)
|
||||
print("\n[注意] API 伺服器目前離線,以下為模擬對話")
|
||||
print("當伺服器恢復後,將自動連接真實 API\n")
|
||||
|
||||
# 模擬回應
|
||||
demo_responses = [
|
||||
"你好!我是 Llama AI 助手,很高興為你服務。",
|
||||
"這是一個示範回應。當 API 伺服器恢復後,你將收到真實的 AI 回應。",
|
||||
"我可以回答問題、協助編程、翻譯文字等多種任務。",
|
||||
"請問有什麼我可以幫助你的嗎?"
|
||||
]
|
||||
|
||||
response_index = 0
|
||||
print("輸入 'exit' 結束對話\n")
|
||||
|
||||
while True:
|
||||
user_input = input("你: ").strip()
|
||||
|
||||
if user_input.lower() in ['exit', 'quit']:
|
||||
print("\n再見!")
|
||||
break
|
||||
|
||||
if not user_input:
|
||||
continue
|
||||
|
||||
# 模擬思考時間
|
||||
print("\nAI 思考中", end="")
|
||||
for _ in range(3):
|
||||
time.sleep(0.3)
|
||||
print(".", end="", flush=True)
|
||||
print()
|
||||
|
||||
# 顯示模擬回應
|
||||
print(f"\nAI: {demo_responses[response_index % len(demo_responses)]}")
|
||||
response_index += 1
|
||||
|
||||
def real_chat():
|
||||
"""實際對話功能(當 API 可用時)"""
|
||||
client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
|
||||
|
||||
print("\n" + "="*50)
|
||||
print("Llama AI 對話系統")
|
||||
print("="*50)
|
||||
print("\n已連接到 Llama API")
|
||||
print("輸入 'exit' 結束對話\n")
|
||||
|
||||
messages = []
|
||||
|
||||
while True:
|
||||
user_input = input("你: ").strip()
|
||||
|
||||
if user_input.lower() in ['exit', 'quit']:
|
||||
print("\n再見!")
|
||||
break
|
||||
|
||||
if not user_input:
|
||||
continue
|
||||
|
||||
messages.append({"role": "user", "content": user_input})
|
||||
|
||||
try:
|
||||
print("\nAI 思考中...")
|
||||
response = client.chat.completions.create(
|
||||
model="gpt-oss-120b",
|
||||
messages=messages,
|
||||
temperature=0.7,
|
||||
max_tokens=1000
|
||||
)
|
||||
|
||||
ai_response = response.choices[0].message.content
|
||||
print(f"\nAI: {ai_response}")
|
||||
messages.append({"role": "assistant", "content": ai_response})
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n[錯誤] {str(e)[:100]}")
|
||||
print("無法取得回應,請稍後再試")
|
||||
|
||||
def main():
|
||||
print("檢查 API 連接狀態...")
|
||||
|
||||
# 嘗試連接 API
|
||||
try:
|
||||
client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
|
||||
|
||||
# 快速測試
|
||||
response = client.chat.completions.create(
|
||||
model="gpt-oss-120b",
|
||||
messages=[{"role": "user", "content": "test"}],
|
||||
max_tokens=10,
|
||||
timeout=5
|
||||
)
|
||||
print("[成功] API 已連接")
|
||||
real_chat()
|
||||
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "502" in error_msg or "Bad gateway" in error_msg:
|
||||
print("[提示] API 伺服器目前離線 (502 錯誤)")
|
||||
print("進入示範模式...")
|
||||
simulate_chat()
|
||||
else:
|
||||
print(f"[錯誤] 無法連接: {error_msg[:100]}")
|
||||
print("\n是否要進入示範模式? (y/n): ", end="")
|
||||
if input().lower() == 'y':
|
||||
simulate_chat()
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
Reference in New Issue
Block a user