Add Python scripts for Llama API chat clients, endpoint testing, and quick tests. Include documentation (README, CONTRIBUTING, 操作指南), license, and .gitignore. Supports multiple endpoints and models for OpenAI-compatible Llama API usage.
46 lines
1.3 KiB
Python
46 lines
1.3 KiB
Python
import requests
|
|
import json
|
|
|
|
API_KEY = "paVrIT+XU1NhwCAOb0X4aYi75QKogK5YNMGvQF1dCyo="
|
|
BASE_URL = "https://llama.theaken.com/v1/chat/completions"
|
|
|
|
def test_api():
|
|
headers = {
|
|
"Authorization": f"Bearer {API_KEY}",
|
|
"Content-Type": "application/json"
|
|
}
|
|
|
|
data = {
|
|
"model": "gpt-oss-120b",
|
|
"messages": [
|
|
{"role": "user", "content": "Hello, can you respond?"}
|
|
],
|
|
"temperature": 0.7,
|
|
"max_tokens": 100
|
|
}
|
|
|
|
print("正在測試 API 連接...")
|
|
print(f"URL: {BASE_URL}")
|
|
print(f"Model: gpt-oss-120b")
|
|
print("-" * 50)
|
|
|
|
try:
|
|
response = requests.post(BASE_URL, headers=headers, json=data, timeout=30)
|
|
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
print("[成功] API 回應:")
|
|
print(result['choices'][0]['message']['content'])
|
|
else:
|
|
print(f"[錯誤] HTTP {response.status_code}")
|
|
print(f"回應內容: {response.text[:500]}")
|
|
|
|
except requests.exceptions.Timeout:
|
|
print("[錯誤] 請求超時")
|
|
except requests.exceptions.ConnectionError:
|
|
print("[錯誤] 無法連接到伺服器")
|
|
except Exception as e:
|
|
print(f"[錯誤] {str(e)}")
|
|
|
|
if __name__ == "__main__":
|
|
test_api() |