ha1772007's picture
Updated Errors and Custom Tools
20e2e59
import requests
from langchain_openai import ChatOpenAI
import json
from langchain_core.prompts import ChatPromptTemplate
from langchain_groq import ChatGroq
from langchain.chains import ConversationChain
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.tools import tool
from langchain.pydantic_v1 import BaseModel, Field
import urllib.parse
def langchain_functions_parser(thisjson):
try:
function_call_data = thisjson['additional_kwargs']['function_call']
thisjson['additional_kwargs']['tool_calls'] = []
thisjson['additional_kwargs']['tool_calls'].append({'id':'No ID','function':function_call_data,'type':'function'})
try:
thisjson['additional_kwargs'].pop('function_call')
except Exception as e:
# print('error '+ str(e))
pass
return thisjson
except Exception as e:
# print('Error'+str(e))
return thisjson
from typing import List, Dict, Any, Optional
from langchain.tools import BaseTool
from pydantic import BaseModel, Field
def convert_openai_tools_to_langchain(tools: List[Dict]) -> List[BaseTool]:
"""
Converts a list of OpenAI tool dictionaries to a list of LangChain BaseTool objects.
"""
langchain_tools = []
for tool in tools:
name = tool["name"]
description = tool["description"]
parameters = tool["parameters"]
# Create a Pydantic model for the tool's arguments
class ToolArgsSchema(BaseModel):
# Dynamically create fields based on the parameters
for param_name, param_info in parameters["properties"].items():
param_type = str if param_info.get("type") == "string" else param_info.get("type")
field_definition = f"{param_name}: {param_type.__name__} = Field(..., description='{param_info.get('description')}')"
exec(field_definition)
# Create a subclass of BaseTool with the _run method implemented
class CustomTool(BaseTool):
def __init__(self, name: str, description: str, args_schema: BaseModel):
super().__init__(name=name, description=description, args_schema=args_schema) # Pass name and description to super()
# self.name = name # No longer needed
# self.description = description # No longer needed
# self.args_schema = args_schema # No longer needed
def _run(self, query: str) -> str: # Replace with appropriate argument and return types
# Implement your tool logic here
print(f"Running tool {self.name} with query: {query}")
# Example: Replace with your actual web search logic
return f"Web search results for: {query}"
# Create an instance of the custom tool
langchain_tool = CustomTool(name=name, description=description, args_schema=ToolArgsSchema)
langchain_tools.append(langchain_tool)
return langchain_tools
class SearchInput(BaseModel):
query: str = Field(description="a search query follo")
from langchain_cohere import ChatCohere
def langchainConversation(conversation):
prompts = []
for message in conversation:
prompts.append((message['role'],urllib.parse.quote((message['context']))))
chat_template = ChatPromptTemplate.from_messages(prompts)
toreturn = []
for z in chat_template.format_messages():
z.content = urllib.parse.unquote(z.content)
toreturn.append(z)
return(toreturn)
def segmind_input_parser(input):
toreturn = []
for thisdict in input:
if(thisdict['role'] == 'ai'):
toreturn.append({'role':'assistant','content':thisdict['context']})
else:
toreturn.append({'role':thisdict['role'],'content':thisdict['context']})
return toreturn
def workers_input_parser(input):
toreturn = []
for thisdict in input:
toreturn.append({'role':thisdict['role'],'content':thisdict['context']})
return toreturn
def segmind_output_parser(input):
return {"content": input['choices'][0]['message']['content'], "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": None, "id": input['id'], "example": False, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": {"input_tokens": input['usage']['prompt_tokens'], "output_tokens": input['usage']['completion_tokens'], "total_tokens": input['usage']['total_tokens']}}
def converse(conversation,provider,model,key,other:dict={}):
if(provider=='groq'):
chat = ChatGroq(temperature=0, groq_api_key=key, model_name=model)
elif(provider=='gemini'):
chat = ChatGoogleGenerativeAI(model=model,google_api_key=key)
elif(provider=='cohere'):
chat = ChatCohere(model=model,cohere_api_key=key)
elif(provider=='lepton'):
url = f'https://{model}.lepton.run/api/v1/'
print(url)
chat = ChatOpenAI(openai_api_base = url,openai_api_key=key)
elif(provider == 'cloudflare'):
try:
account_id = key.split('~')[0]
api_token = key.split('~')[1]
except:
raise Exception('Invalid Accound Id or api token')
API_BASE_URL = f"https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/run/"
headers = {"Authorization": f"Bearer {api_token}"}
def run(model, inputs):
input = { "messages": inputs }
response = requests.post(f"{API_BASE_URL}{model}", headers=headers, json=input)
return response.json()
inputs = workers_input_parser(conversation)
output = run(model, inputs)
print(output)
return json.dumps({'content':output['result']['response']},indent=4)
elif(provider == 'openrouter'):
chat = ChatOpenAI(
base_url="https://openrouter.ai/api/v1",
api_key=key,
model=model
)
elif(provider == 'segmind'):
url = f"https://api.segmind.com/v1/{model}"
# Request payload
data = {
"messages": segmind_input_parser(conversation)
}
response = requests.post(url, json=data, headers={'x-api-key': key})
output = json.loads(response.text)
print(json.dumps(output,indent=4))
return json.dumps(segmind_output_parser(output),indent=4)
else:
return json.dumps({'content':'unspported Provider'})
try:
tools = other['tools']
except:
tools = []
if(provider not in ['cohere'] ):
return json.dumps(langchain_functions_parser(json.loads(chat.invoke(langchainConversation(conversation),functions=tools).json())),indent=4)
else:
chat = chat.bind_tools(convert_openai_tools_to_langchain(tools))
return json.dumps(langchain_functions_parser(json.loads(chat.invoke(langchainConversation(conversation)).json())),indent=4)