APIs break. Networks fail. Models go offline. It’s not if you’ll hit errors—it’s when. AnyAPI uses standard HTTP status codes and gives you structured error responses so you can handle failures gracefully instead of letting your app crash and burn.
async function handleApiResponse(response) { const data = await response.json(); if (!response.ok) { const error = data.error; switch (error.code) { case 400: throw new ValidationError(error.message, error.metadata); case 401: throw new AuthenticationError("Check your API key, buddy"); case 402: throw new InsufficientCreditsError("Time to add more credits!", error.metadata); case 403: throw new ModerationError("Content got flagged", error.metadata); case 429: const retryAfter = error.metadata?.retry_after || 60; throw new RateLimitError(`Slow down! Try again in ${retryAfter}s`, retryAfter); case 502: throw new ModelUnavailableError("Model is taking a nap", error.metadata); default: throw new ApiError(`Something went wrong: ${error.message}`, error.code); } } return data;}
async def generate_with_fallback(prompt, models=None): """Try multiple models until one works""" if models is None: models = [ "openai/gpt-4o", # First choice "anthropic/claude-3-sonnet", # Solid backup "google/gemini-pro" # Emergency option ] for i, model in enumerate(models): try: response = await client.chat.completions.create( model=model, messages=[{"role": "user", "content": prompt}] ) if i > 0: print(f"Fell back to {model} after {i} failures") return response except Exception as e: if e.status_code in [502, 503]: # Model unavailable, try next one print(f"{model} is down, trying next fallback...") continue else: # Other error, don't bother with fallbacks raise e raise Exception("All fallback models failed. The AI apocalypse has begun.")
def test_error_handling(): """Make sure your error handling actually works""" # Test bad auth try: client = AnyAPIClient(api_key="totally-fake-key") response = client.chat.completions.create(...) assert False, "This should have failed!" except AuthenticationError: print("✅ Auth error handling works") # Test rate limiting (carefully!) try: for i in range(1000): # Don't actually do this response = client.chat.completions.create(...) except RateLimitError as e: print(f"✅ Rate limit handling works (retry after {e.retry_after}s)")
def generate_text(prompt): try: return anyapi_generate(prompt) except ApiError: # Fall back to cached responses, simpler logic, or user notification return "Sorry, AI is taking a break. Try again in a few minutes!"
def user_friendly_error(error): """Turn technical errors into messages humans can understand""" friendly_messages = { 401: "Looks like there's an issue with your account. Please contact support.", 402: "You've reached your usage limit. Time to upgrade! 🚀", 403: "That request contains content we can't process. Try rephrasing it.", 429: "Whoa there! You're making requests too quickly. Take a short break.", 500: "Our servers are having a moment. Please try again in a bit.", 502: "The AI model is temporarily unavailable. We're on it!" } return friendly_messages.get(error.code, "Something unexpected happened. Our team has been notified.")
Specific error patterns - Identify systemic issues
Model availability - Know when your preferred models are down
Rate limit proximity - Upgrade before you hit the wall
Remember: Good error handling is invisible to users but saves your sanity. Handle errors gracefully, log everything you need for debugging, and always have a fallback plan.