aaron-di commited on
Commit
91d8471
·
1 Parent(s): 843a514

modify app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -56
app.py CHANGED
@@ -10,22 +10,15 @@ os.system("python -m mindsearch.app --lang cn --model_format internlm_silicon &"
10
  PLANNER_HISTORY = []
11
  SEARCHER_HISTORY = []
12
 
13
-
14
  def rst_mem(history_planner: list, history_searcher: list):
15
- '''
16
- Reset the chatbot memory.
17
- '''
18
  history_planner = []
19
  history_searcher = []
20
  if PLANNER_HISTORY:
21
  PLANNER_HISTORY.clear()
22
  return history_planner, history_searcher
23
 
24
-
25
  def format_response(gr_history, agent_return):
26
- if agent_return['state'] in [
27
- AgentStatusCode.STREAM_ING, AgentStatusCode.ANSWER_ING
28
- ]:
29
  gr_history[-1][1] = agent_return['response']
30
  elif agent_return['state'] == AgentStatusCode.PLUGIN_START:
31
  thought = gr_history[-1][1].split('```')[0]
@@ -34,25 +27,17 @@ def format_response(gr_history, agent_return):
34
  elif agent_return['state'] == AgentStatusCode.PLUGIN_END:
35
  thought = gr_history[-1][1].split('```')[0]
36
  if isinstance(agent_return['response'], dict):
37
- gr_history[-1][
38
- 1] = thought + '\n' + f'```json\n{json.dumps(agent_return["response"], ensure_ascii=False, indent=4)}\n```' # noqa: E501
39
  elif agent_return['state'] == AgentStatusCode.PLUGIN_RETURN:
40
  assert agent_return['inner_steps'][-1]['role'] == 'environment'
41
  item = agent_return['inner_steps'][-1]
42
- gr_history.append([
43
- None,
44
- f"```json\n{json.dumps(item['content'], ensure_ascii=False, indent=4)}\n```"
45
- ])
46
  gr_history.append([None, ''])
47
  return
48
 
49
-
50
  def predict(history_planner, history_searcher):
51
-
52
  def streaming(raw_response):
53
- for chunk in raw_response.iter_lines(chunk_size=8192,
54
- decode_unicode=False,
55
- delimiter=b'\n'):
56
  if chunk:
57
  decoded = chunk.decode('utf-8')
58
  if decoded == '\r':
@@ -71,11 +56,7 @@ def predict(history_planner, history_searcher):
71
  url = 'http://localhost:8002/solve'
72
  headers = {'Content-Type': 'application/json'}
73
  data = {'inputs': PLANNER_HISTORY}
74
- raw_response = requests.post(url,
75
- headers=headers,
76
- data=json.dumps(data),
77
- timeout=20,
78
- stream=True)
79
 
80
  for resp in streaming(raw_response):
81
  agent_return, node_name = resp
@@ -98,57 +79,45 @@ def predict(history_planner, history_searcher):
98
  yield history_planner, history_searcher
99
  return history_planner, history_searcher
100
 
101
-
102
- with gr.Blocks() as demo:
103
- gr.HTML("""<h1 align="center">MindSearch Gradio Demo</h1>""")
104
- gr.HTML("""<p style="text-align: center; font-family: Arial, sans-serif;">MindSearch is an open-source AI Search Engine Framework with Perplexity.ai Pro performance. You can deploy your own Perplexity.ai-style search engine using either closed-source LLMs (GPT, Claude) or open-source LLMs (InternLM2.5-7b-chat).</p>""")
 
 
 
 
 
 
105
  gr.HTML("""
106
- <div style="text-align: center; font-size: 16px;">
107
  <a href="https://github.com/InternLM/MindSearch" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">🔗 GitHub</a>
108
  <a href="https://arxiv.org/abs/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">📄 Arxiv</a>
109
  <a href="https://huggingface.co/papers/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">📚 Hugging Face Papers</a>
110
  <a href="https://huggingface.co/spaces/internlm/MindSearch" style="text-decoration: none; color: #4A90E2;">🤗 Hugging Face Demo</a>
111
  </div>
112
  """)
 
113
  with gr.Row():
114
  with gr.Column(scale=10):
115
  with gr.Row():
116
  with gr.Column():
117
- planner = gr.Chatbot(label='planner',
118
- height=700,
119
- show_label=True,
120
- show_copy_button=True,
121
- bubble_full_width=False,
122
- render_markdown=True)
123
  with gr.Column():
124
- searcher = gr.Chatbot(label='searcher',
125
- height=700,
126
- show_label=True,
127
- show_copy_button=True,
128
- bubble_full_width=False,
129
- render_markdown=True)
130
  with gr.Row():
131
- user_input = gr.Textbox(show_label=False,
132
- placeholder='帮我搜索一下 InternLM 开源体系',
133
- lines=5,
134
- container=False)
135
  with gr.Row():
136
  with gr.Column(scale=2):
137
- submitBtn = gr.Button('Submit')
138
  with gr.Column(scale=1, min_width=20):
139
- emptyBtn = gr.Button('Clear History')
140
 
141
  def user(query, history):
142
  return '', history + [[query, '']]
143
 
144
- submitBtn.click(user, [user_input, planner], [user_input, planner],
145
- queue=False).then(predict, [planner, searcher],
146
- [planner, searcher])
147
- emptyBtn.click(rst_mem, [planner, searcher], [planner, searcher],
148
- queue=False)
149
 
150
  demo.queue()
151
- demo.launch(server_name='0.0.0.0',
152
- server_port=7860,
153
- inbrowser=True,
154
- share=True)
 
10
  PLANNER_HISTORY = []
11
  SEARCHER_HISTORY = []
12
 
 
13
  def rst_mem(history_planner: list, history_searcher: list):
 
 
 
14
  history_planner = []
15
  history_searcher = []
16
  if PLANNER_HISTORY:
17
  PLANNER_HISTORY.clear()
18
  return history_planner, history_searcher
19
 
 
20
  def format_response(gr_history, agent_return):
21
+ if agent_return['state'] in [AgentStatusCode.STREAM_ING, AgentStatusCode.ANSWER_ING]:
 
 
22
  gr_history[-1][1] = agent_return['response']
23
  elif agent_return['state'] == AgentStatusCode.PLUGIN_START:
24
  thought = gr_history[-1][1].split('```')[0]
 
27
  elif agent_return['state'] == AgentStatusCode.PLUGIN_END:
28
  thought = gr_history[-1][1].split('```')[0]
29
  if isinstance(agent_return['response'], dict):
30
+ gr_history[-1][1] = thought + '\n' + f'```json\n{json.dumps(agent_return["response"], ensure_ascii=False, indent=4)}\n```'
 
31
  elif agent_return['state'] == AgentStatusCode.PLUGIN_RETURN:
32
  assert agent_return['inner_steps'][-1]['role'] == 'environment'
33
  item = agent_return['inner_steps'][-1]
34
+ gr_history.append([None, f"```json\n{json.dumps(item['content'], ensure_ascii=False, indent=4)}\n```"])
 
 
 
35
  gr_history.append([None, ''])
36
  return
37
 
 
38
  def predict(history_planner, history_searcher):
 
39
  def streaming(raw_response):
40
+ for chunk in raw_response.iter_lines(chunk_size=8192, decode_unicode=False, delimiter=b'\n'):
 
 
41
  if chunk:
42
  decoded = chunk.decode('utf-8')
43
  if decoded == '\r':
 
56
  url = 'http://localhost:8002/solve'
57
  headers = {'Content-Type': 'application/json'}
58
  data = {'inputs': PLANNER_HISTORY}
59
+ raw_response = requests.post(url, headers=headers, data=json.dumps(data), timeout=20, stream=True)
 
 
 
 
60
 
61
  for resp in streaming(raw_response):
62
  agent_return, node_name = resp
 
79
  yield history_planner, history_searcher
80
  return history_planner, history_searcher
81
 
82
+ with gr.Blocks(css="""
83
+ .block {margin-top: 20px; padding: 20px; border-radius: 10px; border: 1px solid #ddd;}
84
+ .centered-text {text-align: center; font-family: Arial, sans-serif; color: #333;}
85
+ .styled-button {background-color: #4A90E2; color: white; border: none; padding: 10px 20px; border-radius: 5px; cursor: pointer;}
86
+ .styled-button:hover {background-color: #357ABD;}
87
+ .input-textbox {border-radius: 5px; border: 1px solid #ccc; padding: 10px;}
88
+ """) as demo:
89
+ gr.HTML("""<h1 class="centered-text">MindSearch Gradio Demo</h1>""")
90
+ gr.HTML("""<p class="centered-text">MindSearch is an open-source AI Search Engine Framework with Perplexity.ai Pro performance. Deploy your own search engine with either closed-source or open-source LLMs.</p>""")
91
+
92
  gr.HTML("""
93
+ <div class="centered-text" style="font-size: 16px;">
94
  <a href="https://github.com/InternLM/MindSearch" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">🔗 GitHub</a>
95
  <a href="https://arxiv.org/abs/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">📄 Arxiv</a>
96
  <a href="https://huggingface.co/papers/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">📚 Hugging Face Papers</a>
97
  <a href="https://huggingface.co/spaces/internlm/MindSearch" style="text-decoration: none; color: #4A90E2;">🤗 Hugging Face Demo</a>
98
  </div>
99
  """)
100
+
101
  with gr.Row():
102
  with gr.Column(scale=10):
103
  with gr.Row():
104
  with gr.Column():
105
+ planner = gr.Chatbot(label='Planner', height=700, show_label=True, show_copy_button=True, bubble_full_width=False, render_markdown=True)
 
 
 
 
 
106
  with gr.Column():
107
+ searcher = gr.Chatbot(label='Searcher', height=700, show_label=True, show_copy_button=True, bubble_full_width=False, render_markdown=True)
 
 
 
 
 
108
  with gr.Row():
109
+ user_input = gr.Textbox(show_label=False, placeholder='帮我搜索一下 InternLM 开源体系', lines=5, container=False, elem_classes="input-textbox")
 
 
 
110
  with gr.Row():
111
  with gr.Column(scale=2):
112
+ submitBtn = gr.Button('Submit', elem_classes="styled-button")
113
  with gr.Column(scale=1, min_width=20):
114
+ emptyBtn = gr.Button('Clear History', elem_classes="styled-button")
115
 
116
  def user(query, history):
117
  return '', history + [[query, '']]
118
 
119
+ submitBtn.click(user, [user_input, planner], [user_input, planner], queue=False).then(predict, [planner, searcher], [planner, searcher])
120
+ emptyBtn.click(rst_mem, [planner, searcher], [planner, searcher], queue=False)
 
 
 
121
 
122
  demo.queue()
123
+ demo.launch(server_name='0.0.0.0', server_port=7860, inbrowser=True, share=True)