File size: 9,648 Bytes
447ebeb
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
# What is this?
## Tests if proxy/auth/auth_utils.py works as expected

import sys, os, asyncio, time, random, uuid
import traceback
from dotenv import load_dotenv

load_dotenv()
import os

sys.path.insert(
    0, os.path.abspath("../..")
)  # Adds the parent directory to the system path
import pytest
import litellm
from litellm.proxy.auth.auth_utils import (
    _allow_model_level_clientside_configurable_parameters,
)
from litellm.router import Router


@pytest.mark.parametrize(
    "allowed_param, input_value, should_return_true",
    [
        ("api_base", {"api_base": "http://dummy.com"}, True),
        (
            {"api_base": "https://api.openai.com/v1"},
            {"api_base": "https://api.openai.com/v1"},
            True,
        ),  # should return True
        (
            {"api_base": "https://api.openai.com/v1"},
            {"api_base": "https://api.anthropic.com/v1"},
            False,
        ),  # should return False
        (
            {"api_base": "^https://litellm.*direct\.fireworks\.ai/v1$"},
            {"api_base": "https://litellm-dev.direct.fireworks.ai/v1"},
            True,
        ),
        (
            {"api_base": "^https://litellm.*novice\.fireworks\.ai/v1$"},
            {"api_base": "https://litellm-dev.direct.fireworks.ai/v1"},
            False,
        ),
    ],
)
def test_configurable_clientside_parameters(
    allowed_param, input_value, should_return_true
):
    router = Router(
        model_list=[
            {
                "model_name": "dummy-model",
                "litellm_params": {
                    "model": "gpt-3.5-turbo",
                    "api_key": "dummy-key",
                    "configurable_clientside_auth_params": [allowed_param],
                },
            }
        ]
    )
    resp = _allow_model_level_clientside_configurable_parameters(
        model="dummy-model",
        param="api_base",
        request_body_value=input_value["api_base"],
        llm_router=router,
    )
    print(resp)
    assert resp == should_return_true


def test_get_end_user_id_from_request_body_always_returns_str():
    from litellm.proxy.auth.auth_utils import get_end_user_id_from_request_body
    from fastapi import Request
    from unittest.mock import MagicMock

    # Create a mock Request object
    mock_request = MagicMock(spec=Request)
    mock_request.headers = {}
    
    request_body = {"user": 123}
    end_user_id = get_end_user_id_from_request_body(request_body, dict(mock_request.headers))
    assert end_user_id == "123"
    assert isinstance(end_user_id, str)


@pytest.mark.parametrize(
    "headers, general_settings_config, request_body, expected_user_id",
    [
        # Test 1: user_header_name configured and header present
        (
            {"X-User-ID": "header-user-123"},
            {"user_header_name": "X-User-ID"},
            {"user": "body-user-456"},
            "header-user-123"  # Header should take precedence
        ),
        # Test 2: user_header_name configured but header not present, fallback to body
        (
            {},
            {"user_header_name": "X-User-ID"},
            {"user": "body-user-456"},
            "body-user-456"  # Should fall back to body
        ),
        # Test 3: user_header_name not configured, should use body
        (
            {"X-User-ID": "header-user-123"},
            {},
            {"user": "body-user-456"},
            "body-user-456"  # Should ignore header when not configured
        ),
        # Test 4: user_header_name configured, header present, but no body user
        (
            {"X-Custom-User": "header-only-user"},
            {"user_header_name": "X-Custom-User"},
            {"model": "gpt-4"},
            "header-only-user"  # Should use header
        ),
        # Test 5: user_header_name configured but header is empty string
        (
            {"X-User-ID": ""},
            {"user_header_name": "X-User-ID"},
            {"user": "body-user-456"},
            "body-user-456"  # Should fall back to body when header is empty
        ),
        # Test 6: user_header_name configured with case-insensitive header
        (
            {"x-user-id": "lowercase-header-user"},
            {"user_header_name": "x-user-id"},
            {"user": "body-user-456"},
            "lowercase-header-user"
        ),
        # Test 7: user_header_name configured but set to None
        (
            {"X-User-ID": "header-user-123"},
            {"user_header_name": None},
            {"user": "body-user-456"},
            "body-user-456"  # Should fall back to body when header name is None
        ),
        # Test 8: user_header_name is not a string
        (
            {"X-User-ID": "header-user-123"},
            {"user_header_name": 123},
            {"user": "body-user-456"},
            "body-user-456"  # Should fall back to body when header name is not a string
        ),
        # Test 9: Multiple fallback sources - litellm_metadata
        (
            {},
            {"user_header_name": "X-User-ID"},
            {"litellm_metadata": {"user": "litellm-user-789"}},
            "litellm-user-789"
        ),
        # Test 10: Multiple fallback sources - metadata.user_id
        (
            {},
            {"user_header_name": "X-User-ID"},
            {"metadata": {"user_id": "metadata-user-999"}},
            "metadata-user-999"
        ),
        # Test 11: Header takes precedence over all body sources
        (
            {"X-User-ID": "header-priority"},
            {"user_header_name": "X-User-ID"},
            {
                "user": "body-user",
                "litellm_metadata": {"user": "litellm-user"},
                "metadata": {"user_id": "metadata-user"}
            },
            "header-priority"
        ),
    ]
)
def test_get_end_user_id_from_request_body_with_user_header_name(
    headers, general_settings_config, request_body, expected_user_id
):
    """Test that get_end_user_id_from_request_body respects user_header_name property"""
    from litellm.proxy.auth.auth_utils import get_end_user_id_from_request_body
    from fastapi import Request
    from unittest.mock import MagicMock, patch

    # Create a mock Request object with headers
    mock_request = MagicMock(spec=Request)
    mock_request.headers = headers
    
    # Mock general_settings at the proxy_server module level
    with patch('litellm.proxy.proxy_server.general_settings', general_settings_config):
        end_user_id = get_end_user_id_from_request_body(request_body, dict(mock_request.headers))
        assert end_user_id == expected_user_id


def test_get_end_user_id_from_request_body_no_user_found():
    """Test that function returns None when no user ID is found anywhere"""
    from litellm.proxy.auth.auth_utils import get_end_user_id_from_request_body
    from fastapi import Request
    from unittest.mock import MagicMock, patch

    # Create a mock Request object with no relevant headers
    mock_request = MagicMock(spec=Request)
    mock_request.headers = {"X-Other-Header": "some-value"}
    
    # Mock general_settings with user_header_name that doesn't match headers
    general_settings_config = {"user_header_name": "X-User-ID"}
    
    # Request body with no user identifiers
    request_body = {"model": "gpt-4", "messages": [{"role": "user", "content": "hello"}]}
    
    with patch('litellm.proxy.proxy_server.general_settings', general_settings_config):
        end_user_id = get_end_user_id_from_request_body(request_body, dict(mock_request.headers))
        assert end_user_id is None


def test_get_end_user_id_from_request_body_backwards_compatibility():
    """Test that function works with just request_body parameter (backwards compatibility)"""
    from litellm.proxy.auth.auth_utils import get_end_user_id_from_request_body

    # Test with just request_body - should work like before
    request_body = {"user": "test-user-123"}
    end_user_id = get_end_user_id_from_request_body(request_body)
    assert end_user_id == "test-user-123"
    
    # Test with litellm_metadata
    request_body = {"litellm_metadata": {"user": "litellm-user-456"}}
    end_user_id = get_end_user_id_from_request_body(request_body)
    assert end_user_id == "litellm-user-456"
    
    # Test with metadata.user_id
    request_body = {"metadata": {"user_id": "metadata-user-789"}}
    end_user_id = get_end_user_id_from_request_body(request_body)
    assert end_user_id == "metadata-user-789"
    
    # Test with no user - should return None
    request_body = {"model": "gpt-4"}
    end_user_id = get_end_user_id_from_request_body(request_body)
    assert end_user_id is None

@pytest.mark.parametrize(
    "request_data, expected_model",
    [
        ({"target_model_names": "gpt-3.5-turbo, gpt-4o-mini-general-deployment"}, ["gpt-3.5-turbo", "gpt-4o-mini-general-deployment"]),
        ({"target_model_names": "gpt-3.5-turbo"}, ["gpt-3.5-turbo"]),
        ({"model": "gpt-3.5-turbo, gpt-4o-mini-general-deployment"}, ["gpt-3.5-turbo", "gpt-4o-mini-general-deployment"]),
        ({"model": "gpt-3.5-turbo"}, "gpt-3.5-turbo"),
        ({"model": "gpt-3.5-turbo, gpt-4o-mini-general-deployment"}, ["gpt-3.5-turbo", "gpt-4o-mini-general-deployment"]),
    ],
)
def test_get_model_from_request(request_data, expected_model):
    from litellm.proxy.auth.auth_utils import get_model_from_request

    request_data = {"target_model_names": "gpt-3.5-turbo, gpt-4o-mini-general-deployment"}
    route = "/openai/deployments/gpt-3.5-turbo"
    model = get_model_from_request(request_data, "/v1/files")
    assert model == ["gpt-3.5-turbo", "gpt-4o-mini-general-deployment"]