Integration
OpenAI SDK Integration
Complete guide for integrating AgentRouter with OpenAI SDK in Python and TypeScript
OpenAI SDK Integration
This guide provides comprehensive instructions for integrating AgentRouter using the OpenAI SDK. AgentRouter supports multiple providers including OpenAI, DeepSeek, and Moonshot through the OpenAI-compatible API.
Installation
Python
pip install openaiTypeScript
npm install openai
# or
yarn add openai
# or
pnpm add openaiQuick Start
Python Setup
from openai import OpenAI
client = OpenAI(
api_key="sk-ar-your-api-key", # Your AgentRouter API key
base_url="https://your-agentrouter.com/v1" # Your AgentRouter instance
)TypeScript Setup
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: 'sk-ar-your-api-key', // Your AgentRouter API key
baseURL: 'https://your-agentrouter.com/v1' // Your AgentRouter instance
});Model Routing
AgentRouter automatically routes requests to the appropriate provider based on the model name:
| Model Keyword | Provider | Examples |
|---|---|---|
gpt- | OpenAI | gpt-4o, gpt-4-turbo, gpt-3.5-turbo |
deepseek | DeepSeek | deepseek-chat, deepseek-v3 |
moonshot, moon | Moonshot | moonshot-v1-8k, moonshot-v1-32k |
| Others | OpenAI | Default routing |
Basic Chat Completion
Python Example
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is machine learning?"}
]
)
print(response.choices[0].message.content)TypeScript Example
const response = await client.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'What is machine learning?' }
]
});
console.log(response.choices[0].message.content);Streaming Responses
Python Streaming
stream = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Tell me a long story"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="", flush=True)TypeScript Streaming
const stream = await client.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Tell me a long story' }],
stream: true
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
process.stdout.write(content);
}
}Multi-Turn Conversations
Python Conversation
messages = [
{"role": "system", "content": "You are a helpful assistant."}
]
# First turn
messages.append({"role": "user", "content": "My name is Alice"})
response = client.chat.completions.create(
model="gpt-4o",
messages=messages
)
assistant_message = response.choices[0].message.content
messages.append({"role": "assistant", "content": assistant_message})
# Second turn
messages.append({"role": "user", "content": "What's my name?"})
response = client.chat.completions.create(
model="gpt-4o",
messages=messages
)
print(response.choices[0].message.content) # "Your name is Alice"TypeScript Conversation
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: 'system', content: 'You are a helpful assistant.' }
];
// First turn
messages.push({ role: 'user', content: 'My name is Alice' });
let response = await client.chat.completions.create({
model: 'gpt-4o',
messages: messages
});
messages.push({
role: 'assistant',
content: response.choices[0].message.content
});
// Second turn
messages.push({ role: 'user', content: "What's my name?" });
response = await client.chat.completions.create({
model: 'gpt-4o',
messages: messages
});
console.log(response.choices[0].message.content); // "Your name is Alice"Using Different Providers
OpenAI Models
# Python
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)// TypeScript
const response = await client.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello!' }]
});DeepSeek Models
# Python
response = client.chat.completions.create(
model="deepseek-chat",
messages=[{"role": "user", "content": "Hello!"}]
)// TypeScript
const response = await client.chat.completions.create({
model: 'deepseek-chat',
messages: [{ role: 'user', content: 'Hello!' }]
});Moonshot Models
# Python
response = client.chat.completions.create(
model="moonshot-v1-8k",
messages=[{"role": "user", "content": "Hello!"}]
)// TypeScript
const response = await client.chat.completions.create({
model: 'moonshot-v1-8k',
messages: [{ role: 'user', content: 'Hello!' }]
});Advanced Parameters
Temperature and Token Control
# Python
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Write a creative poem"}],
temperature=0.8, # 0-2, higher = more creative
max_tokens=500, # Maximum output tokens
top_p=0.9, # Nucleus sampling
frequency_penalty=0.5, # Reduce repetition
presence_penalty=0.5 # Encourage topic diversity
)// TypeScript
const response = await client.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Write a creative poem' }],
temperature: 0.8, // 0-2, higher = more creative
max_tokens: 500, // Maximum output tokens
top_p: 0.9, // Nucleus sampling
frequency_penalty: 0.5, // Reduce repetition
presence_penalty: 0.5 // Encourage topic diversity
});Function Calling
Python Function Calling
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a city",
"parameters": {
"type": "object",
"properties": {
"city": {
"type": "string",
"description": "The city name, e.g., San Francisco"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature unit"
}
},
"required": ["city"]
}
}
}
]
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "What's the weather in San Francisco?"}],
tools=tools,
tool_choice="auto"
)
# Check for function calls
if response.choices[0].message.tool_calls:
tool_call = response.choices[0].message.tool_calls[0]
print(f"Function: {tool_call.function.name}")
print(f"Arguments: {tool_call.function.arguments}")
# Execute the function and return results
import json
args = json.loads(tool_call.function.arguments)
weather_result = {"temperature": 72, "condition": "sunny"}
# Send function result back
messages = [
{"role": "user", "content": "What's the weather in San Francisco?"},
response.choices[0].message,
{
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(weather_result)
}
]
final_response = client.chat.completions.create(
model="gpt-4o",
messages=messages
)
print(final_response.choices[0].message.content)TypeScript Function Calling
const tools: OpenAI.Chat.ChatCompletionTool[] = [
{
type: 'function',
function: {
name: 'get_weather',
description: 'Get the current weather for a city',
parameters: {
type: 'object',
properties: {
city: {
type: 'string',
description: 'The city name, e.g., San Francisco'
},
unit: {
type: 'string',
enum: ['celsius', 'fahrenheit'],
description: 'Temperature unit'
}
},
required: ['city']
}
}
}
];
const response = await client.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: "What's the weather in San Francisco?" }],
tools: tools,
tool_choice: 'auto'
});
// Check for function calls
const toolCall = response.choices[0].message.tool_calls?.[0];
if (toolCall) {
console.log(`Function: ${toolCall.function.name}`);
console.log(`Arguments: ${toolCall.function.arguments}`);
// Execute the function
const args = JSON.parse(toolCall.function.arguments);
const weatherResult = { temperature: 72, condition: 'sunny' };
// Send function result back
const finalResponse = await client.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'user', content: "What's the weather in San Francisco?" },
response.choices[0].message,
{
role: 'tool',
tool_call_id: toolCall.id,
content: JSON.stringify(weatherResult)
}
]
});
console.log(finalResponse.choices[0].message.content);
}JSON Mode
Python JSON Mode
import json
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant that outputs JSON."},
{"role": "user", "content": "Generate a user profile with name, age, and hobbies"}
],
response_format={"type": "json_object"}
)
data = json.loads(response.choices[0].message.content)
print(json.dumps(data, indent=2))TypeScript JSON Mode
const response = await client.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'system', content: 'You are a helpful assistant that outputs JSON.' },
{ role: 'user', content: 'Generate a user profile with name, age, and hobbies' }
],
response_format: { type: 'json_object' }
});
const data = JSON.parse(response.choices[0].message.content || '{}');
console.log(JSON.stringify(data, null, 2));Error Handling
Python Error Handling
from openai import (
OpenAIError,
APIError,
APIConnectionError,
RateLimitError,
AuthenticationError
)
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)
except AuthenticationError:
print("Authentication failed - check your API key")
except RateLimitError:
print("Rate limit exceeded - please retry later")
except APIConnectionError:
print("Network connection error")
except APIError as e:
print(f"API error: {e.status_code} - {e.message}")
except OpenAIError as e:
print(f"OpenAI SDK error: {e}")TypeScript Error Handling
import { OpenAI } from 'openai';
try {
const response = await client.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello!' }]
});
} catch (error) {
if (error instanceof OpenAI.APIError) {
console.error(`API Error: ${error.status} - ${error.message}`);
if (error.status === 401) {
console.error('Authentication failed - check your API key');
} else if (error.status === 402) {
console.error('Insufficient wallet balance');
} else if (error.status === 429) {
console.error('Rate limit exceeded');
}
} else {
console.error('Unexpected error:', error);
}
}Best Practices
Environment Variables
Python:
import os
from openai import OpenAI
client = OpenAI(
api_key=os.getenv("AGENTROUTER_API_KEY"),
base_url=os.getenv("AGENTROUTER_BASE_URL", "https://your-agentrouter.com/v1")
)TypeScript:
import OpenAI from 'openai';
const client = new OpenAI({
apiKey: process.env.AGENTROUTER_API_KEY,
baseURL: process.env.AGENTROUTER_BASE_URL || 'https://your-agentrouter.com/v1'
});Retry Logic
Python:
import time
from openai import APIError
def chat_with_retry(client, max_retries=3, **kwargs):
"""Chat completion with exponential backoff retry."""
for attempt in range(max_retries):
try:
return client.chat.completions.create(**kwargs)
except APIError as e:
if attempt == max_retries - 1:
raise
wait_time = 2 ** attempt # Exponential backoff
print(f"Error: {e}, retrying in {wait_time}s...")
time.sleep(wait_time)
# Usage
response = chat_with_retry(
client,
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)TypeScript:
async function chatWithRetry(
client: OpenAI,
params: OpenAI.Chat.ChatCompletionCreateParams,
maxRetries = 3
): Promise<OpenAI.Chat.ChatCompletion> {
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
return await client.chat.completions.create(params);
} catch (error) {
if (attempt === maxRetries - 1) throw error;
const waitTime = Math.pow(2, attempt) * 1000; // Exponential backoff
console.log(`Error: ${error}, retrying in ${waitTime}ms...`);
await new Promise(resolve => setTimeout(resolve, waitTime));
}
}
throw new Error('Max retries exceeded');
}
// Usage
const response = await chatWithRetry(client, {
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello!' }]
});Token Usage Tracking
Python:
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
response = client.chat.completions.create(
model="gpt-4o",
messages=[{"role": "user", "content": "Hello!"}]
)
if response.usage:
logger.info(f"Tokens - Input: {response.usage.prompt_tokens}, "
f"Output: {response.usage.completion_tokens}, "
f"Total: {response.usage.total_tokens}")TypeScript:
const response = await client.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Hello!' }]
});
if (response.usage) {
console.log(`Tokens - Input: ${response.usage.prompt_tokens}, ` +
`Output: ${response.usage.completion_tokens}, ` +
`Total: ${response.usage.total_tokens}`);
}Common Error Codes
| Status Code | Description | Solution |
|---|---|---|
| 401 | Unauthorized | Check API key format (must start with sk-ar-) |
| 402 | Payment Required | Insufficient wallet balance - top up your wallet |
| 429 | Rate Limit Exceeded | Wait for rate limit reset or adjust request frequency |
| 500 | Internal Server Error | Upstream provider issue - retry later |
| 503 | Service Unavailable | Service temporarily unavailable - retry with backoff |
Complete Example Applications
Python CLI Chatbot
#!/usr/bin/env python3
import os
from openai import OpenAI
def main():
client = OpenAI(
api_key=os.getenv("AGENTROUTER_API_KEY"),
base_url=os.getenv("AGENTROUTER_BASE_URL", "https://your-agentrouter.com/v1")
)
messages = [
{"role": "system", "content": "You are a helpful assistant."}
]
print("Chat started! Type 'quit' to exit.\n")
while True:
user_input = input("You: ").strip()
if user_input.lower() == 'quit':
break
if not user_input:
continue
messages.append({"role": "user", "content": user_input})
try:
response = client.chat.completions.create(
model="gpt-4o",
messages=messages
)
assistant_message = response.choices[0].message.content
messages.append({"role": "assistant", "content": assistant_message})
print(f"Assistant: {assistant_message}\n")
except Exception as e:
print(f"Error: {e}\n")
if __name__ == "__main__":
main()TypeScript Chat Application
import OpenAI from 'openai';
import * as readline from 'readline';
const client = new OpenAI({
apiKey: process.env.AGENTROUTER_API_KEY!,
baseURL: process.env.AGENTROUTER_BASE_URL || 'https://your-agentrouter.com/v1'
});
async function main() {
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: 'system', content: 'You are a helpful assistant.' }
];
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
console.log("Chat started! Type 'quit' to exit.\n");
const askQuestion = (query: string): Promise<string> => {
return new Promise(resolve => rl.question(query, resolve));
};
while (true) {
const userInput = await askQuestion('You: ');
if (userInput.toLowerCase() === 'quit') {
break;
}
if (!userInput.trim()) {
continue;
}
messages.push({ role: 'user', content: userInput });
try {
const response = await client.chat.completions.create({
model: 'gpt-4o',
messages: messages
});
const assistantMessage = response.choices[0].message.content;
if (assistantMessage) {
messages.push({ role: 'assistant', content: assistantMessage });
console.log(`Assistant: ${assistantMessage}\n`);
}
} catch (error) {
console.error(`Error: ${error}\n`);
}
}
rl.close();
}
main();