Spaces:
Configuration error
Configuration error
File size: 7,661 Bytes
447ebeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 |
import json
import os
import sys
from unittest.mock import patch, MagicMock
import pytest
sys.path.insert(
0, os.path.abspath("../..")
) # Adds the parent directory to the system path
import litellm
from litellm import completion
from litellm.utils import get_optional_params
class TestPerplexityReasoning:
"""
Test suite for Perplexity Sonar reasoning models with reasoning_effort parameter
"""
@pytest.mark.parametrize(
"model,reasoning_effort",
[
("perplexity/sonar-reasoning", "low"),
("perplexity/sonar-reasoning", "medium"),
("perplexity/sonar-reasoning", "high"),
("perplexity/sonar-reasoning-pro", "low"),
("perplexity/sonar-reasoning-pro", "medium"),
("perplexity/sonar-reasoning-pro", "high"),
]
)
def test_perplexity_reasoning_effort_parameter_mapping(self, model, reasoning_effort):
"""
Test that reasoning_effort parameter is correctly mapped for Perplexity Sonar reasoning models
"""
# Set up local model cost map
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
litellm.model_cost = litellm.get_model_cost_map(url="")
# Get provider and optional params
_, provider, _, _ = litellm.get_llm_provider(model=model)
optional_params = get_optional_params(
model=model,
custom_llm_provider=provider,
reasoning_effort=reasoning_effort,
)
# Verify that reasoning_effort is preserved in optional_params for Perplexity
assert "reasoning_effort" in optional_params
assert optional_params["reasoning_effort"] == reasoning_effort
@pytest.mark.parametrize(
"model",
[
"perplexity/sonar-reasoning",
"perplexity/sonar-reasoning-pro",
]
)
def test_perplexity_reasoning_effort_mock_completion(self, model):
"""
Test that reasoning_effort is correctly passed in actual completion call (mocked)
"""
from openai import OpenAI
from openai.types.chat.chat_completion import ChatCompletion
litellm.set_verbose = True
# Mock successful response with reasoning content
response_object = {
"id": "cmpl-test",
"object": "chat.completion",
"created": 1677652288,
"model": model.split("/")[1],
"choices": [
{
"index": 0,
"message": {
"role": "assistant",
"content": "This is a test response from the reasoning model.",
"reasoning_content": "Let me think about this step by step...",
},
"finish_reason": "stop",
}
],
"usage": {
"prompt_tokens": 9,
"completion_tokens": 20,
"total_tokens": 29,
"completion_tokens_details": {
"reasoning_tokens": 15
}
},
}
pydantic_obj = ChatCompletion(**response_object)
def _return_pydantic_obj(*args, **kwargs):
new_response = MagicMock()
new_response.headers = {"content-type": "application/json"}
new_response.parse.return_value = pydantic_obj
return new_response
openai_client = OpenAI(api_key="fake-api-key")
with patch.object(
openai_client.chat.completions.with_raw_response, "create", side_effect=_return_pydantic_obj
) as mock_client:
response = completion(
model=model,
messages=[{"role": "user", "content": "Hello, please think about this carefully."}],
reasoning_effort="high",
client=openai_client,
)
# Verify the call was made
assert mock_client.called
# Get the request data from the mock call
call_args = mock_client.call_args
request_data = call_args.kwargs
# Verify reasoning_effort was included in the request
assert "reasoning_effort" in request_data
assert request_data["reasoning_effort"] == "high"
# Verify response structure
assert response.choices[0].message.content is not None
assert response.choices[0].message.content == "This is a test response from the reasoning model."
def test_perplexity_reasoning_models_support_reasoning(self):
"""
Test that Perplexity Sonar reasoning models are correctly identified as supporting reasoning
"""
from litellm.utils import supports_reasoning
# Set up local model cost map
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
litellm.model_cost = litellm.get_model_cost_map(url="")
reasoning_models = [
"perplexity/sonar-reasoning",
"perplexity/sonar-reasoning-pro",
]
for model in reasoning_models:
assert supports_reasoning(model, None), f"{model} should support reasoning"
def test_perplexity_non_reasoning_models_dont_support_reasoning(self):
"""
Test that non-reasoning Perplexity models don't support reasoning
"""
from litellm.utils import supports_reasoning
# Set up local model cost map
os.environ["LITELLM_LOCAL_MODEL_COST_MAP"] = "True"
litellm.model_cost = litellm.get_model_cost_map(url="")
non_reasoning_models = [
"perplexity/sonar",
"perplexity/sonar-pro",
"perplexity/llama-3.1-sonar-large-128k-chat",
"perplexity/mistral-7b-instruct",
]
for model in non_reasoning_models:
# These models should not support reasoning (should return False or raise exception)
try:
result = supports_reasoning(model, None)
# If it doesn't raise an exception, it should return False
assert result is False, f"{model} should not support reasoning"
except Exception:
# If it raises an exception, that's also acceptable behavior
pass
@pytest.mark.parametrize(
"model,expected_api_base",
[
("perplexity/sonar-reasoning", "https://api.perplexity.ai"),
("perplexity/sonar-reasoning-pro", "https://api.perplexity.ai"),
]
)
def test_perplexity_reasoning_api_base_configuration(self, model, expected_api_base):
"""
Test that Perplexity reasoning models use the correct API base
"""
from litellm.llms.perplexity.chat.transformation import PerplexityChatConfig
config = PerplexityChatConfig()
api_base, _ = config._get_openai_compatible_provider_info(
api_base=None, api_key="test-key"
)
assert api_base == expected_api_base
def test_perplexity_reasoning_effort_in_supported_params(self):
"""
Test that reasoning_effort is in the list of supported parameters for Perplexity
"""
from litellm.llms.perplexity.chat.transformation import PerplexityChatConfig
config = PerplexityChatConfig()
supported_params = config.get_supported_openai_params(model="perplexity/sonar-reasoning")
assert "reasoning_effort" in supported_params |