Initial commit: Llama API Client with full documentation
- Added complete Python client for Llama AI models - Support for internal network endpoints (tested and working) - Support for external network endpoints (configured) - Interactive chat interface with multiple models - Automatic endpoint testing and failover - Response cleaning for special markers - Full documentation in English and Chinese - Complete test suite and examples - MIT License and contribution guidelines
This commit is contained in:
46
simple_llama_test.py
Normal file
46
simple_llama_test.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import requests
|
||||
import json
|
||||
|
||||
API_KEY = "paVrIT+XU1NhwCAOb0X4aYi75QKogK5YNMGvQF1dCyo="
|
||||
BASE_URL = "https://llama.theaken.com/v1/chat/completions"
|
||||
|
||||
def test_api():
|
||||
headers = {
|
||||
"Authorization": f"Bearer {API_KEY}",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
|
||||
data = {
|
||||
"model": "gpt-oss-120b",
|
||||
"messages": [
|
||||
{"role": "user", "content": "Hello, can you respond?"}
|
||||
],
|
||||
"temperature": 0.7,
|
||||
"max_tokens": 100
|
||||
}
|
||||
|
||||
print("正在測試 API 連接...")
|
||||
print(f"URL: {BASE_URL}")
|
||||
print(f"Model: gpt-oss-120b")
|
||||
print("-" * 50)
|
||||
|
||||
try:
|
||||
response = requests.post(BASE_URL, headers=headers, json=data, timeout=30)
|
||||
|
||||
if response.status_code == 200:
|
||||
result = response.json()
|
||||
print("[成功] API 回應:")
|
||||
print(result['choices'][0]['message']['content'])
|
||||
else:
|
||||
print(f"[錯誤] HTTP {response.status_code}")
|
||||
print(f"回應內容: {response.text[:500]}")
|
||||
|
||||
except requests.exceptions.Timeout:
|
||||
print("[錯誤] 請求超時")
|
||||
except requests.exceptions.ConnectionError:
|
||||
print("[錯誤] 無法連接到伺服器")
|
||||
except Exception as e:
|
||||
print(f"[錯誤] {str(e)}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_api()
|
Reference in New Issue
Block a user