Connect AnyAPI with popular AI tools, frameworks, and development platforms
export ANYAPI_KEY="your-api-key-here"
pip install litellm
from litellm import completion response = completion( model="anyapi/gpt-4o", messages=[{"role": "user", "content": "Hello!"}], api_key=os.environ["ANYAPI_KEY"] )
# Test API connection import requests response = requests.post( "https://api.anyapi.ai/v1/chat/completions", headers={ "Authorization": f"Bearer {os.environ['ANYAPI_KEY']}", "Content-Type": "application/json" }, json={ "model": "gpt-4o", "messages": [{"role": "user", "content": "Test connection"}] } ) print("✅ Connection successful!" if response.status_code == 200 else "❌ Connection failed")
headers = { "Authorization": f"Bearer {api_key}", "Content-Type": "application/json" }
# .env file ANYAPI_KEY=your-api-key ANYAPI_BASE_URL=https://api.anyapi.ai/v1
from anyapi import AnyAPI client = AnyAPI(api_key="your-key") # or use environment variable client = AnyAPI() # reads ANYAPI_KEY
# config/development.yml anyapi: base_url: https://api.anyapi.ai/v1 timeout: 30 retries: 3 # config/production.yml anyapi: base_url: https://api.anyapi.ai/v1 timeout: 60 retries: 5 rate_limit: 1000
import anyapi from anyapi.exceptions import APIError, RateLimitError try: response = client.chat.completions.create( model="gpt-4o", messages=[{"role": "user", "content": "Hello"}] ) except RateLimitError: # Handle rate limiting time.sleep(60) retry_request() except APIError as e: # Handle API errors logging.error(f"API error: {e.status_code} - {e.message}") except Exception as e: # Handle unexpected errors logging.error(f"Unexpected error: {e}")
import logging from anyapi.middleware import LoggingMiddleware # Configure logging logging.basicConfig(level=logging.INFO) # Add middleware for request/response logging client = AnyAPI( api_key="your-key", middleware=[LoggingMiddleware()] )
# Before (OpenAI) from openai import OpenAI client = OpenAI(api_key="openai-key") # After (AnyAPI) from openai import OpenAI client = OpenAI( api_key="anyapi-key", base_url="https://api.anyapi.ai/v1" )
# Before (Anthropic) from anthropic import Anthropic client = Anthropic(api_key="anthropic-key") # After (AnyAPI) from openai import OpenAI client = OpenAI( api_key="anyapi-key", base_url="https://api.anyapi.ai/v1" ) # Use Claude models response = client.chat.completions.create( model="claude-3-5-sonnet", messages=[{"role": "user", "content": "Hello"}] )
# Before (Google AI) import google.generativeai as genai genai.configure(api_key="google-key") # After (AnyAPI) from openai import OpenAI client = OpenAI( api_key="anyapi-key", base_url="https://api.anyapi.ai/v1" ) # Use Gemini models response = client.chat.completions.create( model="gemini-pro", messages=[{"role": "user", "content": "Hello"}] )
import time import random def api_call_with_retry(func, max_retries=3): for attempt in range(max_retries): try: return func() except Exception as e: if attempt == max_retries - 1: raise e wait_time = (2 ** attempt) + random.uniform(0, 1) time.sleep(wait_time)
def stream_response(messages): stream = client.chat.completions.create( model="gpt-4o", messages=messages, stream=True ) for chunk in stream: if chunk.choices[0].delta.content: yield chunk.choices[0].delta.content
async def batch_process(requests, batch_size=10): results = [] for i in range(0, len(requests), batch_size): batch = requests[i:i + batch_size] batch_results = await asyncio.gather( *[process_single_request(req) for req in batch] ) results.extend(batch_results) return results
Error: Invalid API key
Error: Rate limit exceeded
Error: Model 'invalid-model' not found
import logging logging.basicConfig(level=logging.DEBUG) # This will log all HTTP requests/responses client = AnyAPI(api_key="your-key", debug=True)
def health_check(): try: response = client.chat.completions.create( model="gpt-4o-mini", messages=[{"role": "user", "content": "ping"}], max_tokens=1 ) return True except Exception: return False