import gradio as gr import requests from g4f import Provider, models from langchain.llms.base import LLM import g4f from langchain_g4f import G4FLLM g4f.debug.logging = True # Enable logging g4f.check_version = False # Disable automatic version checking #print(g4f.version) # Check version print(g4f.Provider.Ails.params) # Supported args url = "https://app.embedchain.ai/api/v1/pipelines/f14b3df8-db63-456c-8a7f-4323b4467271/context/" def greet(name): payload = { "query": f"{name}", "count": 15 } headers = { 'Authorization': 'Token ec-pbVFWamfNAciPwb18ZwaQkKKUCCBnafko9ydl3Y5', } response = requests.request("POST", url, headers=headers, json=payload) print(name) c = response.text llm = LLM = G4FLLM(model=models.gpt_35_turbo_16k) def hein(q): re = llm(q) return re with gr.Blocks() as demo: chatbot = gr.Chatbot() msg = gr.Textbox() clear = gr.ClearButton([msg, chatbot]) def respond(message, chat_history): print(message) print("ok") print(chat_history) res = hein(q) bot_message = res chat_history.append((message,bot_message)) time.sleep(2) return "", chat_history msg.submit(respond, [msg, chatbot], [msg, chatbot]) demo.launch() #iface = gr.Interface(fn=greet, inputs="text", outputs="text") #iface.launch()