File size: 1,371 Bytes
0a07732
 
 
c4b57ef
 
bd62f39
c4b57ef
5be60ea
 
ea2be3b
50adde5
c4b57ef
 
0a07732
 
 
 
 
 
 
7925389
0a07732
d03ad5d
 
 
 
0a07732
 
 
 
c4b57ef
 
cd66d08
f914591
cd66d08
 
d2b13eb
 
 
f914591
0a07732
4fa7a54
 
 
 
 
 
 
 
 
d2b13eb
4fa7a54
f914591
4fa7a54
 
 
 
 
 
0a07732
 
4fa7a54
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
import requests

from g4f import Provider, models
from langchain.llms.base import LLM
import g4f
from langchain_g4f import G4FLLM
g4f.debug.logging = True  # Enable logging
g4f.check_version = False  # Disable automatic version checking
#print(g4f.version)  # Check version
print(g4f.Provider.Ails.params)  # Supported args


url = "https://app.embedchain.ai/api/v1/pipelines/f14b3df8-db63-456c-8a7f-4323b4467271/context/"



def greet(name):
    payload = {
  "query": f"{name}",
  "count": 15
    }
    headers = {
  'Authorization': 'Token ec-pbVFWamfNAciPwb18ZwaQkKKUCCBnafko9ydl3Y5',
}

    
    response = requests.request("POST", url, headers=headers, json=payload)

    print(name)
    c = response.text


llm = LLM = G4FLLM(model=models.gpt_35_turbo_16k)


def hein(q):
    re = llm(q)
    return re
    

with gr.Blocks() as demo:
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.ClearButton([msg, chatbot])

    def respond(message, chat_history):
        print(message)
        print("ok")
        print(chat_history)
        res = hein(q)
        bot_message = res
        chat_history.append((message,bot_message))
        time.sleep(2)
        return "", chat_history

    msg.submit(respond, [msg, chatbot], [msg, chatbot])

demo.launch()


#iface = gr.Interface(fn=greet, inputs="text", outputs="text")
#iface.launch()