File size: 9,483 Bytes
246d201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
from unittest.mock import MagicMock, patch

import pytest
import requests
from litellm.exceptions import RateLimitError

from openhands.core.config import LLMConfig
from openhands.events.action.message import MessageAction
from openhands.llm.llm import LLM
from openhands.resolver.github_issue import GithubIssue
from openhands.resolver.issue_definitions import IssueHandler, PRHandler


@pytest.fixture(autouse=True)
def mock_logger(monkeypatch):
    # suppress logging of completion data to file
    mock_logger = MagicMock()
    monkeypatch.setattr('openhands.llm.debug_mixin.llm_prompt_logger', mock_logger)
    monkeypatch.setattr('openhands.llm.debug_mixin.llm_response_logger', mock_logger)
    return mock_logger


@pytest.fixture
def default_config():
    return LLMConfig(
        model='gpt-4o',
        api_key='test_key',
        num_retries=2,
        retry_min_wait=1,
        retry_max_wait=2,
    )


def test_handle_nonexistent_issue_reference():
    llm_config = LLMConfig(model='test', api_key='test')
    handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config)

    # Mock the requests.get to simulate a 404 error
    mock_response = MagicMock()
    mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
        '404 Client Error: Not Found'
    )

    with patch('requests.get', return_value=mock_response):
        # Call the method with a non-existent issue reference
        result = handler._PRHandler__get_context_from_external_issues_references(
            closing_issues=[],
            closing_issue_numbers=[],
            issue_body='This references #999999',  # Non-existent issue
            review_comments=[],
            review_threads=[],
            thread_comments=None,
        )

        # The method should return an empty list since the referenced issue couldn't be fetched
        assert result == []


def test_handle_rate_limit_error():
    llm_config = LLMConfig(model='test', api_key='test')
    handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config)

    # Mock the requests.get to simulate a rate limit error
    mock_response = MagicMock()
    mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
        '403 Client Error: Rate Limit Exceeded'
    )

    with patch('requests.get', return_value=mock_response):
        # Call the method with an issue reference
        result = handler._PRHandler__get_context_from_external_issues_references(
            closing_issues=[],
            closing_issue_numbers=[],
            issue_body='This references #123',
            review_comments=[],
            review_threads=[],
            thread_comments=None,
        )

        # The method should return an empty list since the request was rate limited
        assert result == []


def test_handle_network_error():
    llm_config = LLMConfig(model='test', api_key='test')
    handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config)

    # Mock the requests.get to simulate a network error
    with patch(
        'requests.get', side_effect=requests.exceptions.ConnectionError('Network Error')
    ):
        # Call the method with an issue reference
        result = handler._PRHandler__get_context_from_external_issues_references(
            closing_issues=[],
            closing_issue_numbers=[],
            issue_body='This references #123',
            review_comments=[],
            review_threads=[],
            thread_comments=None,
        )

        # The method should return an empty list since the network request failed
        assert result == []


def test_successful_issue_reference():
    llm_config = LLMConfig(model='test', api_key='test')
    handler = PRHandler('test-owner', 'test-repo', 'test-token', llm_config)

    # Mock a successful response
    mock_response = MagicMock()
    mock_response.raise_for_status.return_value = None
    mock_response.json.return_value = {'body': 'This is the referenced issue body'}

    with patch('requests.get', return_value=mock_response):
        # Call the method with an issue reference
        result = handler._PRHandler__get_context_from_external_issues_references(
            closing_issues=[],
            closing_issue_numbers=[],
            issue_body='This references #123',
            review_comments=[],
            review_threads=[],
            thread_comments=None,
        )

        # The method should return a list with the referenced issue body
        assert result == ['This is the referenced issue body']


class MockLLMResponse:
    """Mock LLM Response class to mimic the actual LLM response structure."""

    class Choice:
        class Message:
            def __init__(self, content):
                self.content = content

        def __init__(self, content):
            self.message = self.Message(content)

    def __init__(self, content):
        self.choices = [self.Choice(content)]


class DotDict(dict):
    """

    A dictionary that supports dot notation access.

    """

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        for key, value in self.items():
            if isinstance(value, dict):
                self[key] = DotDict(value)
            elif isinstance(value, list):
                self[key] = [
                    DotDict(item) if isinstance(item, dict) else item for item in value
                ]

    def __getattr__(self, key):
        if key in self:
            return self[key]
        else:
            raise AttributeError(
                f"'{self.__class__.__name__}' object has no attribute '{key}'"
            )

    def __setattr__(self, key, value):
        self[key] = value

    def __delattr__(self, key):
        if key in self:
            del self[key]
        else:
            raise AttributeError(
                f"'{self.__class__.__name__}' object has no attribute '{key}'"
            )


@patch('openhands.llm.llm.litellm_completion')
def test_guess_success_rate_limit_wait_time(mock_litellm_completion, default_config):
    """Test that the retry mechanism in guess_success respects wait time between retries."""

    with patch('time.sleep') as mock_sleep:
        # Simulate a rate limit error followed by a successful response
        mock_litellm_completion.side_effect = [
            RateLimitError(
                'Rate limit exceeded', llm_provider='test_provider', model='test_model'
            ),
            DotDict(
                {
                    'choices': [
                        {
                            'message': {
                                'content': '--- success\ntrue\n--- explanation\nRetry successful'
                            }
                        }
                    ]
                }
            ),
        ]

        llm = LLM(config=default_config)
        handler = IssueHandler('test-owner', 'test-repo', 'test-token', default_config)
        handler.llm = llm

        # Mock issue and history
        issue = GithubIssue(
            owner='test-owner',
            repo='test-repo',
            number=1,
            title='Test Issue',
            body='This is a test issue.',
            thread_comments=['Please improve error handling'],
        )
        history = [MessageAction(content='Fixed error handling.')]

        # Call guess_success
        success, _, explanation = handler.guess_success(issue, history)

        # Assertions
        assert success is True
        assert explanation == 'Retry successful'
        assert mock_litellm_completion.call_count == 2  # Two attempts made
        mock_sleep.assert_called_once()  # Sleep called once between retries

        # Validate wait time
        wait_time = mock_sleep.call_args[0][0]
        assert (
            default_config.retry_min_wait <= wait_time <= default_config.retry_max_wait
        ), f'Expected wait time between {default_config.retry_min_wait} and {default_config.retry_max_wait} seconds, but got {wait_time}'


@patch('openhands.llm.llm.litellm_completion')
def test_guess_success_exhausts_retries(mock_completion, default_config):
    """Test the retry mechanism in guess_success exhausts retries and raises an error."""
    # Simulate persistent rate limit errors by always raising RateLimitError
    mock_completion.side_effect = RateLimitError(
        'Rate limit exceeded', llm_provider='test_provider', model='test_model'
    )

    # Initialize LLM and handler
    llm = LLM(config=default_config)
    handler = PRHandler('test-owner', 'test-repo', 'test-token', default_config)
    handler.llm = llm

    # Mock issue and history
    issue = GithubIssue(
        owner='test-owner',
        repo='test-repo',
        number=1,
        title='Test Issue',
        body='This is a test issue.',
        thread_comments=['Please improve error handling'],
    )
    history = [MessageAction(content='Fixed error handling.')]

    # Call guess_success and expect it to raise an error after retries
    with pytest.raises(RateLimitError):
        handler.guess_success(issue, history)

    # Assertions
    assert (
        mock_completion.call_count == default_config.num_retries
    )  # Initial call + retries