BMTools-demo / app.py
congxin95's picture
update
ca64815
raw
history blame
11.1 kB
import gradio as gr
import sys
# sys.path.append('./inference/')
import bmtools
from bmtools.agent.tools_controller import MTQuestionAnswerer, load_valid_tools
from bmtools.agent.singletool import STQuestionAnswerer
from langchain.schema import AgentFinish
import os
import requests
from threading import Thread
from multiprocessing import Process
import time
available_models = ["ChatGPT", "GPT-3.5"]
DEFAULTMODEL = "GPT-3.5"
tools_mappings = {
"klarna": "https://www.klarna.com/",
"chemical-prop": "http://127.0.0.1:8079/tools/chemical-prop/",
"wolframalpha": "http://127.0.0.1:8079/tools/wolframalpha/",
"weather": "http://127.0.0.1:8079/tools/weather/",
"douban-film": "http://127.0.0.1:8079/tools/douban-film/",
"wikipedia": "http://127.0.0.1:8079/tools/wikipedia/",
"office-ppt": "http://127.0.0.1:8079/tools/office-ppt/",
"bing_search": "http://127.0.0.1:8079/tools/bing_search/",
"map": "http://127.0.0.1:8079/tools/map/",
"stock": "http://127.0.0.1:8079/tools/stock/",
"baidu-translation": "http://127.0.0.1:8079/tools/baidu-translation/",
"nllb-translation": "http://127.0.0.1:8079/tools/nllb-translation/",
}
valid_tools_info = {}
all_tools_list = []
gr.close_all()
MAX_TURNS = 30
MAX_BOXES = MAX_TURNS * 2
return_msg = []
chat_history = ""
tool_server_flag = False
def run_tool_server():
def run_server():
server = bmtools.ToolServer()
# server.load_tool("chemical-prop")
server.load_tool("douban-film")
# server.load_tool("weather")
# server.load_tool("wikipedia")
# server.load_tool("wolframalpha")
# server.load_tool("bing_search")
# server.load_tool("office-ppt")
# server.load_tool("stock")
# server.load_tool("map")
# server.load_tool("nllb-translation")
# server.load_tool("baidu-translation")
# server.load_tool("tutorial")
server.serve()
# server = Thread(target=run_server)
server = Process(target=run_server)
server.start()
global tool_server_flag
tool_server_flag = True
def load_tools():
global valid_tools_info
global all_tools_list
valid_tools_info = load_valid_tools(tools_mappings)
all_tools_list = sorted(list(valid_tools_info.keys()))
return gr.update(choices=all_tools_list)
def set_environ(OPENAI_API_KEY: str,
WOLFRAMALPH_APP_ID: str = "",
WEATHER_API_KEYS: str = "",
BING_SUBSCRIPT_KEY: str = "",
ALPHA_VANTAGE_KEY: str = "",
BING_MAP_KEY: str = "",
BAIDU_TRANSLATE_KEY: str = "",
BAIDU_SECRET_KEY: str = "") -> str:
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
os.environ["WOLFRAMALPH_APP_ID"] = WOLFRAMALPH_APP_ID
os.environ["WEATHER_API_KEYS"] = WEATHER_API_KEYS
os.environ["BING_SUBSCRIPT_KEY"] = BING_SUBSCRIPT_KEY
os.environ["ALPHA_VANTAGE_KEY"] = ALPHA_VANTAGE_KEY
os.environ["BING_MAP_KEY"] = BING_MAP_KEY
os.environ["BAIDU_TRANSLATE_KEY"] = BAIDU_TRANSLATE_KEY
os.environ["BAIDU_SECRET_KEY"] = BAIDU_SECRET_KEY
if not tool_server_flag:
run_tool_server()
time.sleep(10)
return gr.update(value="OK!")
def show_avatar_imgs(tools_chosen):
if len(tools_chosen) == 0:
tools_chosen = list(valid_tools_info.keys())
img_template = '<a href="{}" style="float: left"> <img style="margin:5px" src="{}.png" width="24" height="24" alt="avatar" /> {} </a>'
imgs = [valid_tools_info[tool]['avatar'] for tool in tools_chosen if valid_tools_info[tool]['avatar'] != None]
imgs = ' '.join([img_template.format(img, img, tool ) for img, tool in zip(imgs, tools_chosen) ])
return [gr.update(value='<span class="">'+imgs+'</span>', visible=True), gr.update(visible=True)]
def answer_by_tools(question, tools_chosen, model_chosen):
global return_msg
return_msg += [(question, None), (None, '...')]
yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()]
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY', '')
if len(tools_chosen) == 0: # if there is no tools chosen, we use all todo (TODO: What if the pool is too large.)
tools_chosen = list(valid_tools_info.keys())
if len(tools_chosen) == 1:
answerer = STQuestionAnswerer(OPENAI_API_KEY.strip(), stream_output=True, llm=model_chosen)
agent_executor = answerer.load_tools(tools_chosen[0], valid_tools_info[tools_chosen[0]], prompt_type="react-with-tool-description", return_intermediate_steps=True)
else:
answerer = MTQuestionAnswerer(OPENAI_API_KEY.strip(), load_valid_tools({k: tools_mappings[k] for k in tools_chosen}), stream_output=True, llm=model_chosen)
agent_executor = answerer.build_runner()
global chat_history
chat_history += "Question: " + question + "\n"
question = chat_history
for inter in agent_executor(question):
if isinstance(inter, AgentFinish): continue
result_str = []
return_msg.pop()
if isinstance(inter, dict):
result_str.append("<font color=red>Answer:</font> {}".format(inter['output']))
chat_history += "Answer:" + inter['output'] + "\n"
result_str.append("...")
else:
not_observation = inter[0].log
if not not_observation.startswith('Thought:'):
not_observation = "Thought: " + not_observation
chat_history += not_observation
not_observation = not_observation.replace('Thought:', '<font color=green>Thought: </font>')
not_observation = not_observation.replace('Action:', '<font color=purple>Action: </font>')
not_observation = not_observation.replace('Action Input:', '<font color=purple>Action Input: </font>')
result_str.append("{}".format(not_observation))
result_str.append("<font color=blue>Action output:</font>\n{}".format(inter[1]))
chat_history += "\nAction output:" + inter[1] + "\n"
result_str.append("...")
return_msg += [(None, result) for result in result_str]
yield [gr.update(visible=True, value=return_msg), gr.update(), gr.update()]
return_msg.pop()
if return_msg[-1][1].startswith("<font color=red>Answer:</font> "):
return_msg[-1] = (return_msg[-1][0], return_msg[-1][1].replace("<font color=red>Answer:</font> ", "<font color=green>Final Answer:</font> "))
yield [gr.update(visible=True, value=return_msg), gr.update(visible=True), gr.update(visible=False)]
def retrieve(tools_search):
if tools_search == "":
return gr.update(choices=all_tools_list)
else:
url = "http://127.0.0.1:8079/retrieve"
param = {
"query": tools_search
}
response = requests.post(url, json=param)
result = response.json()
retrieved_tools = result["tools"]
return gr.update(choices=retrieved_tools)
def clear_history():
global return_msg
global chat_history
return_msg = []
chat_history = ""
yield gr.update(visible=True, value=return_msg)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=14):
gr.Markdown("<h1 align='left'> BMTools </h1>")
with gr.Column(scale=1):
gr.Markdown('<img src="https://openbmb.cn/openbmb/img/head_logo.e9d9f3f.png" width="140">')
with gr.Row():
# with gr.Column(scale=1):
# OPENAI_API_KEY = gr.Textbox(label="OpenAI API KEY:", placeholder="sk-...", type="text")
# # WOLFRAMALPH_APP_ID = gr.Textbox(label="WOLFRAMALPH APP ID:", type="text")
# # WEATHER_API_KEYS = gr.Textbox(label="WEATHER API KEYS:", type="text")
# # BING_SUBSCRIPT_KEY = gr.Textbox(label="BING SUBSCRIPT KEY:", type="text")
# # ALPHA_VANTAGE_KEY = gr.Textbox(label="ALPHA VANTAGE KEY:", type="text")
# # BING_MAP_KEY = gr.Textbox(label="BING MAP KEY:", type="text")
# # BAIDU_TRANSLATE_KEY = gr.Textbox(label="BAIDU TRANSLATE KEY:", type="text")
# # BAIDU_SECRET_KEY = gr.Textbox(label="BAIDU SECRET KEY:", type="text")
# key_set_btn = gr.Button(value="Set")
with gr.Column(scale=4):
with gr.Row():
with gr.Column(scale=0.85):
txt = gr.Textbox(show_label=False, placeholder="Question here. Use Shift+Enter to add new line.", lines=1).style(container=False)
with gr.Column(scale=0.15, min_width=0):
buttonClear = gr.Button("Clear History")
buttonStop = gr.Button("Stop", visible=False)
chatbot = gr.Chatbot(show_label=False, visible=True).style(height=600)
with gr.Column(scale=1):
with gr.Column():
tools_search = gr.Textbox(
lines=1,
label="Tools Search",
info="Please input some text to search tools.",
)
buttonSearch = gr.Button("Clear")
tools_chosen = gr.CheckboxGroup(
choices=all_tools_list,
value=["chemical-prop"],
label="Tools provided",
info="Choose the tools to solve your question.",
)
model_chosen = gr.Dropdown(
list(available_models), value=DEFAULTMODEL, multiselect=False, label="Model provided", info="Choose the model to solve your question, Default means ChatGPT."
)
OPENAI_API_KEY = gr.Textbox(label="OpenAI API KEY:", placeholder="sk-...", type="text")
key_set_btn = gr.Button(value="Set")
key_set_btn.click(fn=set_environ, inputs=[
OPENAI_API_KEY,
# WOLFRAMALPH_APP_ID,
# WEATHER_API_KEYS,
# BING_SUBSCRIPT_KEY,
# ALPHA_VANTAGE_KEY,
# BING_MAP_KEY,
# BAIDU_TRANSLATE_KEY,
# BAIDU_SECRET_KEY
], outputs=key_set_btn)
key_set_btn.click(fn=load_tools, outputs=tools_chosen)
tools_search.change(retrieve, tools_search, tools_chosen)
buttonSearch.click(lambda : [gr.update(value=""), gr.update(choices=all_tools_list)], [], [tools_search, tools_chosen])
txt.submit(lambda : [gr.update(value=''), gr.update(visible=False), gr.update(visible=True)], [], [txt, buttonClear, buttonStop])
inference_event = txt.submit(answer_by_tools, [txt, tools_chosen, model_chosen], [chatbot, buttonClear, buttonStop])
buttonStop.click(lambda : [gr.update(visible=True), gr.update(visible=False)], [], [buttonClear, buttonStop], cancels=[inference_event])
buttonClear.click(clear_history, [], chatbot)
# demo.queue().launch(share=False, inbrowser=True, server_name="127.0.0.1", server_port=7001)
demo.queue().launch()