Spaces:
Configuration error
Configuration error
File size: 3,016 Bytes
447ebeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 |
import asyncio
import os
import sys
import time
import traceback
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import concurrent
from dotenv import load_dotenv
import asyncio
import litellm
@pytest.mark.asyncio
async def test_acompletion_fallbacks_basic():
response = await litellm.acompletion(
model="openai/unknown-model",
messages=[{"role": "user", "content": "Hello, world!"}],
fallbacks=["openai/gpt-4o-mini"],
)
print(response)
assert response is not None
@pytest.mark.asyncio
async def test_acompletion_fallbacks_bad_models():
"""
Test that the acompletion call times out after 10 seconds - if no fallbacks work
"""
try:
# Wrap the acompletion call with asyncio.wait_for to enforce a timeout
response = await asyncio.wait_for(
litellm.acompletion(
model="openai/unknown-model",
messages=[{"role": "user", "content": "Hello, world!"}],
fallbacks=["openai/bad-model", "openai/unknown-model"],
),
timeout=5.0, # Timeout after 5 seconds
)
assert response is not None
except asyncio.TimeoutError:
pytest.fail("Test timed out - possible infinite loop in fallbacks")
except Exception as e:
print(e)
pass
@pytest.mark.asyncio
async def test_acompletion_fallbacks_with_dict_config():
"""
Test fallbacks with dictionary configuration that includes model-specific settings
"""
response = await litellm.acompletion(
model="openai/gpt-4o-mini",
messages=[{"role": "user", "content": "Hello, world!"}],
api_key="very-bad-api-key",
fallbacks=[{"api_key": os.getenv("OPENAI_API_KEY")}],
)
assert response is not None
@pytest.mark.asyncio
async def test_acompletion_fallbacks_empty_list():
"""
Test behavior when fallbacks list is empty
"""
try:
response = await litellm.acompletion(
model="openai/unknown-model",
messages=[{"role": "user", "content": "Hello, world!"}],
fallbacks=[],
)
except Exception as e:
assert isinstance(e, litellm.NotFoundError)
@pytest.mark.asyncio
async def test_acompletion_fallbacks_none_response():
"""
Test handling when a fallback model returns None
Should continue to next fallback rather than returning None
"""
response = await litellm.acompletion(
model="openai/unknown-model",
messages=[{"role": "user", "content": "Hello, world!"}],
fallbacks=["gpt-3.5-turbo"], # replace with a model you know works
)
assert response is not None
async def test_completion_fallbacks_sync():
response = litellm.completion(
model="openai/unknown-model",
messages=[{"role": "user", "content": "Hello, world!"}],
fallbacks=["openai/gpt-4o-mini"],
)
print(response)
assert response is not None
|