Spaces:
Configuration error
Configuration error
File size: 3,300 Bytes
447ebeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import os
import sys
import pytest
import asyncio
from typing import Optional
from unittest.mock import patch, AsyncMock
sys.path.insert(0, os.path.abspath("../.."))
import litellm
from litellm.integrations.custom_logger import CustomLogger
import json
from litellm.types.utils import StandardLoggingPayload
from litellm.types.llms.openai import (
ResponseCompletedEvent,
ResponsesAPIResponse,
ResponseTextConfig,
ResponseAPIUsage,
IncompleteDetails,
)
import litellm
from litellm.llms.custom_httpx.http_handler import AsyncHTTPHandler
from base_responses_api import BaseResponsesAPITest
from openai.types.responses.function_tool import FunctionTool
class TestAnthropicResponsesAPITest(BaseResponsesAPITest):
def get_base_completion_call_args(self):
#litellm._turn_on_debug()
return {
"model": "anthropic/claude-3-5-sonnet-latest",
}
async def test_basic_openai_responses_delete_endpoint(self, sync_mode=False):
pass
async def test_basic_openai_responses_streaming_delete_endpoint(self, sync_mode=False):
pass
async def test_basic_openai_responses_get_endpoint(self, sync_mode=False):
pass
def test_multiturn_tool_calls():
# Test streaming response with tools for Anthropic
litellm._turn_on_debug()
shell_tool = dict(FunctionTool(
type="function",
name="shell",
description="Runs a shell command, and returns its output.",
parameters={
"type": "object",
"properties": {
"command": {"type": "array", "items": {"type": "string"}},
"workdir": {"type": "string", "description": "The working directory for the command."}
},
"required": ["command"]
},
strict=True
))
# Step 1: Initial request with the tool
response = litellm.responses(
input=[{
'role': 'user',
'content': [
{'type': 'input_text', 'text': 'make a hello world html file'}
],
'type': 'message'
}],
model='anthropic/claude-3-7-sonnet-latest',
instructions='You are a helpful coding assistant.',
tools=[shell_tool]
)
print("response=", response)
# Step 2: Send the results of the tool call back to the model
# Get the response ID and tool call ID from the response
response_id = response.id
tool_call_id = ""
for item in response.output:
if 'type' in item and item['type'] == 'function_call':
tool_call_id = item['call_id']
break
# Use await with asyncio.run for the async function
follow_up_response = litellm.responses(
model='anthropic/claude-3-7-sonnet-latest',
previous_response_id=response_id,
input=[{
'type': 'function_call_output',
'call_id': tool_call_id,
'output': '{"output":"<html>\\n<head>\\n <title>Hello Page</title>\\n</head>\\n<body>\\n <h1>Hi</h1>\\n <p>Welcome to this simple webpage!</p>\\n</body>\\n</html> > index.html\\n","metadata":{"exit_code":0,"duration_seconds":0}}'
}],
tools=[shell_tool]
)
print("follow_up_response=", follow_up_response)
|