File size: 6,916 Bytes
207b8e0
4e510e1
ed1f8da
 
 
 
 
20e2e59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4e510e1
9dfac8f
20e2e59
ed1f8da
 
 
20e2e59
ed1f8da
20e2e59
 
 
 
 
 
b353c0d
20e2e59
 
 
 
 
 
 
 
b353c0d
 
 
 
 
20e2e59
9dfac8f
ed1f8da
 
1d582be
ed1f8da
9dfac8f
 
4e510e1
 
 
 
b353c0d
 
 
 
 
 
e3bfdb0
 
 
 
 
 
 
 
20e2e59
afcfca9
83ef605
20e2e59
b353c0d
 
 
 
 
 
e3bfdb0
b353c0d
 
 
 
 
 
 
 
 
 
20e2e59
9dfac8f
b353c0d
20e2e59
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import requests
from langchain_openai import ChatOpenAI
import json
from langchain_core.prompts import ChatPromptTemplate
from langchain_groq import ChatGroq
from langchain.chains import ConversationChain
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain.tools import tool
from langchain.pydantic_v1 import BaseModel, Field
import urllib.parse


def langchain_functions_parser(thisjson):
    try:
        function_call_data = thisjson['additional_kwargs']['function_call']
        thisjson['additional_kwargs']['tool_calls'] = []
        thisjson['additional_kwargs']['tool_calls'].append({'id':'No ID','function':function_call_data,'type':'function'})
        try:
            thisjson['additional_kwargs'].pop('function_call')
        except Exception as e:
            # print('error '+ str(e))
            pass
        return thisjson
    
    except Exception as e:
        # print('Error'+str(e))
        return thisjson
    

from typing import List, Dict, Any, Optional
from langchain.tools import BaseTool
from pydantic import BaseModel, Field

def convert_openai_tools_to_langchain(tools: List[Dict]) -> List[BaseTool]:
    """
    Converts a list of OpenAI tool dictionaries to a list of LangChain BaseTool objects.
    """
    langchain_tools = []
    for tool in tools:
        name = tool["name"]
        description = tool["description"]
        parameters = tool["parameters"]

        # Create a Pydantic model for the tool's arguments
        class ToolArgsSchema(BaseModel):
            # Dynamically create fields based on the parameters
            for param_name, param_info in parameters["properties"].items():
                param_type = str if param_info.get("type") == "string" else param_info.get("type")
                field_definition = f"{param_name}: {param_type.__name__} = Field(..., description='{param_info.get('description')}')"
                exec(field_definition)

        # Create a subclass of BaseTool with the _run method implemented
        class CustomTool(BaseTool):
            def __init__(self, name: str, description: str, args_schema: BaseModel):
                super().__init__(name=name, description=description, args_schema=args_schema)  # Pass name and description to super()
                # self.name = name  # No longer needed
                # self.description = description  # No longer needed
                # self.args_schema = args_schema  # No longer needed

            def _run(self, query: str) -> str:  # Replace with appropriate argument and return types
                # Implement your tool logic here
                print(f"Running tool {self.name} with query: {query}")
                # Example: Replace with your actual web search logic
                return f"Web search results for: {query}"

        # Create an instance of the custom tool
        langchain_tool = CustomTool(name=name, description=description, args_schema=ToolArgsSchema)
        langchain_tools.append(langchain_tool)

    return langchain_tools
class SearchInput(BaseModel):
    query: str = Field(description="a search query  follo")

from langchain_cohere import ChatCohere

def langchainConversation(conversation):
    prompts = []
    for message in conversation:
        prompts.append((message['role'],urllib.parse.quote((message['context']))))
    chat_template = ChatPromptTemplate.from_messages(prompts)
    toreturn = []
     
    for z in chat_template.format_messages():
        z.content = urllib.parse.unquote(z.content)
        toreturn.append(z)
    return(toreturn)
def segmind_input_parser(input):
    toreturn = []
    for thisdict in input:
        if(thisdict['role'] == 'ai'):
            toreturn.append({'role':'assistant','content':thisdict['context']})
        else:
            toreturn.append({'role':thisdict['role'],'content':thisdict['context']})
    return toreturn
def workers_input_parser(input):
    toreturn = []
    for thisdict in input:
        toreturn.append({'role':thisdict['role'],'content':thisdict['context']})
    return toreturn
def segmind_output_parser(input):
    return {"content": input['choices'][0]['message']['content'], "additional_kwargs": {}, "response_metadata": {}, "type": "ai", "name": None, "id": input['id'], "example": False, "tool_calls": [], "invalid_tool_calls": [], "usage_metadata": {"input_tokens": input['usage']['prompt_tokens'], "output_tokens": input['usage']['completion_tokens'], "total_tokens": input['usage']['total_tokens']}}
def converse(conversation,provider,model,key,other:dict={}):
    if(provider=='groq'):
        chat = ChatGroq(temperature=0, groq_api_key=key, model_name=model)
    elif(provider=='gemini'):
        chat = ChatGoogleGenerativeAI(model=model,google_api_key=key)
    elif(provider=='cohere'):
        chat = ChatCohere(model=model,cohere_api_key=key)
    elif(provider=='lepton'):
        url = f'https://{model}.lepton.run/api/v1/'
        print(url)
        chat = ChatOpenAI(openai_api_base = url,openai_api_key=key)
    elif(provider == 'cloudflare'):
        try:
            account_id = key.split('~')[0]
            api_token = key.split('~')[1]
        except:
            raise Exception('Invalid Accound Id or api token')
        API_BASE_URL = f"https://api.cloudflare.com/client/v4/accounts/{account_id}/ai/run/"
        headers = {"Authorization": f"Bearer {api_token}"}


        def run(model, inputs):
            input = { "messages": inputs }
            response = requests.post(f"{API_BASE_URL}{model}", headers=headers, json=input)
            return response.json()
        inputs = workers_input_parser(conversation)
        output = run(model, inputs)
        print(output)
        return json.dumps({'content':output['result']['response']},indent=4)
    elif(provider == 'openrouter'):
        chat = ChatOpenAI(
            base_url="https://openrouter.ai/api/v1",
            api_key=key,
            model=model
        )
    elif(provider == 'segmind'):
        url = f"https://api.segmind.com/v1/{model}"

        # Request payload
        data = {
            "messages": segmind_input_parser(conversation)
        }

        response = requests.post(url, json=data, headers={'x-api-key': key})
        output = json.loads(response.text)
        print(json.dumps(output,indent=4))
        return json.dumps(segmind_output_parser(output),indent=4)
    else:
        return json.dumps({'content':'unspported Provider'})
    try:
        tools = other['tools']
    except:
        tools = []

    if(provider not in ['cohere'] ):
        return json.dumps(langchain_functions_parser(json.loads(chat.invoke(langchainConversation(conversation),functions=tools).json())),indent=4)
    else:
        
        chat = chat.bind_tools(convert_openai_tools_to_langchain(tools))
        return json.dumps(langchain_functions_parser(json.loads(chat.invoke(langchainConversation(conversation)).json())),indent=4)