Spaces:
Configuration error
Configuration error
File size: 6,134 Bytes
447ebeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 |
import base64
import json
import os
import sys
import pytest
from fastapi.testclient import TestClient
sys.path.insert(
0, os.path.abspath("../../..")
) # Adds the parent directory to the system path
import litellm
from litellm.llms.base_llm.responses.transformation import BaseResponsesAPIConfig
from litellm.llms.openai.responses.transformation import OpenAIResponsesAPIConfig
from litellm.responses.utils import ResponseAPILoggingUtils, ResponsesAPIRequestUtils
from litellm.types.llms.openai import ResponsesAPIOptionalRequestParams
from litellm.types.utils import Usage
class TestResponsesAPIRequestUtils:
def test_get_optional_params_responses_api(self):
"""Test that optional parameters are correctly processed for responses API"""
# Setup
model = "gpt-4o"
config = OpenAIResponsesAPIConfig()
optional_params = ResponsesAPIOptionalRequestParams(
{"temperature": 0.7, "max_output_tokens": 100}
)
# Execute
result = ResponsesAPIRequestUtils.get_optional_params_responses_api(
model=model,
responses_api_provider_config=config,
response_api_optional_params=optional_params,
)
# Assert
assert result == optional_params
assert "temperature" in result
assert result["temperature"] == 0.7
assert "max_output_tokens" in result
assert result["max_output_tokens"] == 100
def test_get_optional_params_responses_api_unsupported_param(self):
"""Test that unsupported parameters raise an error"""
# Setup
model = "gpt-4o"
config = OpenAIResponsesAPIConfig()
optional_params = ResponsesAPIOptionalRequestParams(
{"temperature": 0.7, "unsupported_param": "value"}
)
# Execute and Assert
with pytest.raises(litellm.UnsupportedParamsError) as excinfo:
ResponsesAPIRequestUtils.get_optional_params_responses_api(
model=model,
responses_api_provider_config=config,
response_api_optional_params=optional_params,
)
assert "unsupported_param" in str(excinfo.value)
assert model in str(excinfo.value)
def test_get_requested_response_api_optional_param(self):
"""Test filtering parameters to only include those in ResponsesAPIOptionalRequestParams"""
# Setup
params = {
"temperature": 0.7,
"max_output_tokens": 100,
"invalid_param": "value",
"model": "gpt-4o", # This is not in ResponsesAPIOptionalRequestParams
}
# Execute
result = ResponsesAPIRequestUtils.get_requested_response_api_optional_param(
params
)
# Assert
assert "temperature" in result
assert "max_output_tokens" in result
assert "invalid_param" not in result
assert "model" not in result
assert result["temperature"] == 0.7
assert result["max_output_tokens"] == 100
def test_decode_previous_response_id_to_original_previous_response_id(self):
"""Test decoding a LiteLLM encoded previous_response_id to the original previous_response_id"""
# Setup
test_provider = "openai"
test_model_id = "gpt-4o"
original_response_id = "resp_abc123"
# Use the helper method to build an encoded response ID
encoded_id = ResponsesAPIRequestUtils._build_responses_api_response_id(
custom_llm_provider=test_provider,
model_id=test_model_id,
response_id=original_response_id,
)
# Execute
result = ResponsesAPIRequestUtils.decode_previous_response_id_to_original_previous_response_id(
encoded_id
)
# Assert
assert result == original_response_id
# Test with a non-encoded ID
plain_id = "resp_xyz789"
result_plain = ResponsesAPIRequestUtils.decode_previous_response_id_to_original_previous_response_id(
plain_id
)
assert result_plain == plain_id
class TestResponseAPILoggingUtils:
def test_is_response_api_usage_true(self):
"""Test identification of Response API usage format"""
# Setup
usage = {"input_tokens": 10, "output_tokens": 20}
# Execute
result = ResponseAPILoggingUtils._is_response_api_usage(usage)
# Assert
assert result is True
def test_is_response_api_usage_false(self):
"""Test identification of non-Response API usage format"""
# Setup
usage = {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
# Execute
result = ResponseAPILoggingUtils._is_response_api_usage(usage)
# Assert
assert result is False
def test_transform_response_api_usage_to_chat_usage(self):
"""Test transformation from Response API usage to Chat usage format"""
# Setup
usage = {
"input_tokens": 10,
"output_tokens": 20,
"total_tokens": 30,
"output_tokens_details": {"reasoning_tokens": 5},
}
# Execute
result = ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage(
usage
)
# Assert
assert isinstance(result, Usage)
assert result.prompt_tokens == 10
assert result.completion_tokens == 20
assert result.total_tokens == 30
def test_transform_response_api_usage_with_none_values(self):
"""Test transformation handles None values properly"""
# Setup
usage = {
"input_tokens": 0, # Changed from None to 0
"output_tokens": 20,
"total_tokens": 20,
"output_tokens_details": {"reasoning_tokens": 5},
}
# Execute
result = ResponseAPILoggingUtils._transform_response_api_usage_to_chat_usage(
usage
)
# Assert
assert result.prompt_tokens == 0
assert result.completion_tokens == 20
assert result.total_tokens == 20
|