Model ID: meta-llama/llama-guard-3-8b
per 1M tokens
per 1M tokens
import requests
# Fetch pricing data from llmprices.ai
response = requests.get(
"https://llmprices.ai/api/pricing?model=meta-llama/llama-guard-3-8b"
)
data = response.json()
print(f"Model: {data['name']}")
print(f"Input: {float(data["pricing"]["prompt"]) * 1000000:.2f}/1M tokens")
print(f"Output: {float(data["pricing"]["completion"]) * 1000000:.2f}/1M tokens")Endpoint:
GET https://llmprices.ai/api/pricing?model=meta-llama/llama-guard-3-8bExample Response:
{
"id": "meta-llama/llama-guard-3-8b",
"name": "Llama Guard 3 8B",
"pricing": {
"prompt": "0.00000002",
"completion": "0.00000006"
}
}