Spaces:
Configuration error
Configuration error
File size: 7,304 Bytes
447ebeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 |
from abc import ABC, abstractmethod
from litellm.caching import LiteLLMCacheType
import os
import sys
import time
import traceback
import uuid
from dotenv import load_dotenv
load_dotenv()
import os
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import asyncio
import hashlib
import random
import pytest
import litellm
from litellm.caching import Cache
from litellm import completion, embedding
class LLMCachingUnitTests(ABC):
@abstractmethod
def get_cache_type(self) -> LiteLLMCacheType:
pass
@pytest.mark.parametrize("sync_mode", [True, False])
@pytest.mark.asyncio
async def test_cache_completion(self, sync_mode):
litellm._turn_on_debug()
random_number = random.randint(
1, 100000
) # add a random number to ensure it's always adding / reading from cache
messages = [
{
"role": "user",
"content": f"write a one sentence poem about: {random_number}",
}
]
cache_type = self.get_cache_type()
litellm.cache = Cache(
type=cache_type,
)
if sync_mode:
response1 = completion(
"gpt-3.5-turbo",
messages=messages,
caching=True,
max_tokens=20,
mock_response="This number is so great!",
)
else:
response1 = await litellm.acompletion(
"gpt-3.5-turbo",
messages=messages,
caching=True,
max_tokens=20,
mock_response="This number is so great!",
)
# response2 is mocked to a different response from response1,
# but the completion from the cache should be used instead of the mock
# response since the input is the same as response1
await asyncio.sleep(0.5)
if sync_mode:
response2 = completion(
"gpt-3.5-turbo",
messages=messages,
caching=True,
max_tokens=20,
mock_response="This number is great!",
)
else:
response2 = await litellm.acompletion(
"gpt-3.5-turbo",
messages=messages,
caching=True,
max_tokens=20,
mock_response="This number is great!",
)
if (
response1["choices"][0]["message"]["content"]
!= response2["choices"][0]["message"]["content"]
): # 1 and 2 should be the same
# 1&2 have the exact same input params. This MUST Be a CACHE HIT
print(f"response1: {response1}")
print(f"response2: {response2}")
pytest.fail(
f"Error occurred: response1 - {response1['choices'][0]['message']['content']} != response2 - {response2['choices'][0]['message']['content']}"
)
# Since the parameters are not the same as response1, response3 should actually
# be the mock response
if sync_mode:
response3 = completion(
"gpt-3.5-turbo",
messages=messages,
caching=True,
temperature=0.5,
mock_response="This number is awful!",
)
else:
response3 = await litellm.acompletion(
"gpt-3.5-turbo",
messages=messages,
caching=True,
temperature=0.5,
mock_response="This number is awful!",
)
print("\nresponse 1", response1)
print("\nresponse 2", response2)
print("\nresponse 3", response3)
# print("\nresponse 4", response4)
litellm.cache = None
litellm.success_callback = []
litellm._async_success_callback = []
# 1 & 2 should be exactly the same
# 1 & 3 should be different, since input params are diff
if (
response1["choices"][0]["message"]["content"]
== response3["choices"][0]["message"]["content"]
):
# if input params like max_tokens, temperature are diff it should NOT be a cache hit
print(f"response1: {response1}")
print(f"response3: {response3}")
pytest.fail(
f"Response 1 == response 3. Same model, diff params shoudl not cache Error"
f" occurred:"
)
assert response1.id == response2.id
assert response1.created == response2.created
assert (
response1.choices[0].message.content == response2.choices[0].message.content
)
@pytest.mark.parametrize("sync_mode", [True, False])
@pytest.mark.asyncio
async def test_disk_cache_embedding(self, sync_mode):
litellm._turn_on_debug()
random_number = random.randint(
1, 100000
) # add a random number to ensure it's always adding / reading from cache
input = [f"hello {random_number}"]
litellm.cache = Cache(
type="disk",
)
if sync_mode:
response1 = embedding(
"openai/text-embedding-ada-002",
input=input,
caching=True,
)
else:
response1 = await litellm.aembedding(
"openai/text-embedding-ada-002",
input=input,
caching=True,
)
# response2 is mocked to a different response from response1,
# but the completion from the cache should be used instead of the mock
# response since the input is the same as response1
await asyncio.sleep(0.5)
if sync_mode:
response2 = embedding(
"openai/text-embedding-ada-002",
input=input,
caching=True,
)
else:
response2 = await litellm.aembedding(
"openai/text-embedding-ada-002",
input=input,
caching=True,
)
if response2._hidden_params["cache_hit"] is not True:
pytest.fail("Cache hit should be True")
# Since the parameters are not the same as response1, response3 should actually
# be the mock response
if sync_mode:
response3 = embedding(
"openai/text-embedding-ada-002",
input=input,
user="charlie",
caching=True,
)
else:
response3 = await litellm.aembedding(
"openai/text-embedding-ada-002",
input=input,
caching=True,
user="charlie",
)
print("\nresponse 1", response1)
print("\nresponse 2", response2)
print("\nresponse 3", response3)
# print("\nresponse 4", response4)
litellm.cache = None
litellm.success_callback = []
litellm._async_success_callback = []
# 1 & 2 should be exactly the same
# 1 & 3 should be different, since input params are diff
if response3._hidden_params.get("cache_hit") is True:
pytest.fail("Cache hit should not be True")
|