acecalisto3 commited on
Commit
20ca81b
·
verified ·
1 Parent(s): 082cfab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +383 -245
app.py CHANGED
@@ -1,251 +1,389 @@
1
- from langchain_core.prompts import PromptTemplate
2
- from langchain_core.runnables import RunnableSequence
3
- from huggingfacehub import HuggingFace-Hub, InferenceApi as InferenceClient
4
- from langchain_community.llms import HuggingFaceEndpoint
5
- from streamlit import StreamlitApp, write, text_input, text_area, button, session_state
6
  import os
7
- import time
8
-
9
- # Load LLM
10
- llm = HuggingFaceEndpoint(repo_id="tiiuae/falcon-7b-instruct", model_kwargs={"temperature": 0.1, "max_new_tokens": 500})
11
-
12
-
13
- class Agent:
14
- def __init__(self, name: str, agent_type: str, complexity: int):
15
- self.name: str = name
16
- self.agent_type: str = agent_type
17
- self.complexity: int = complexity
18
- self.tools: List[Tool] = []
19
-
20
- def add_tool(self, tool: Tool):
21
- self.tools.append(tool)
22
-
23
- def __str__(self):
24
- return f"{self.name} ({self.agent_type}) - Complexity: {self.complexity}"
25
-
26
- class Tool:
27
- def __init__(self, name: str, tool_type: str):
28
- self.name: str = name
29
- self.tool_type: str = tool_type
30
-
31
- def __str__(self):
32
- return f"{self.name} ({self.tool_type})"
33
-
34
- class Pypelyne:
35
- def __init__(self):
36
- self.agents: List[Agent] = []
37
- self.tools: List[Tool] = []
38
- self.history: str = ""
39
- self.task: str = ""
40
- self.purpose: str = ""
41
- self.directory: str = ""
42
- self.task_queue: list = []
43
-
44
- def add_agent(self, agent: Agent):
45
- self.agents.append(agent)
46
-
47
- def add_tool(self, tool: Tool):
48
- self.tools.append(tool)
49
-
50
- def launch_chatapp(project_path):
51
- if st.button("Launch ChatApp"):
52
- st.write("Launching ChatApp...")
53
- os.chdir(project_path)
54
- subprocess.run(["python", "app.py"])
55
- st.write("ChatApp launched successfully!")
56
-
57
- def run_gpt(
58
- self,
59
- prompt_template: PromptTemplate,
60
- stop_tokens: List[str],
61
- max_tokens: int,
62
- **prompt_kwargs,
63
- ) -> str:
64
- content = f"""{PREFIX}
65
- {prompt_template.format(**prompt_kwargs)}"""
66
-
67
- if VERBOSE:
68
- print(LOG_PROMPT.format(content))
69
-
70
- try:
71
- stream = llm.predict(content)
72
- resp = "".join(stream)
73
- except Exception as e:
74
- print(f"Error in run_gpt: {e}")
75
- resp = f"Error: {e}"
76
-
77
- if VERBOSE:
78
- print(LOG_RESPONSE.format(resp))
79
- return resp
80
-
81
- def compress_history(self):
82
- resp = self.run_gpt(
83
- COMPRESS_HISTORY_PROMPT,
84
- stop_tokens=["observation:", "task:", "action:", "thought:"],
85
- max_tokens=512,
86
- task=self.task,
87
- history=self.history,
88
- )
89
- self.history = f"observation: {resp}\n"
90
-
91
- def run_action(self, action_name: str, action_input: Union[str, List[str]], tools: List[Tool] = None) -> str:
92
- if action_name == "COMPLETE":
93
- return "Task completed."
94
-
95
- if len(self.history.split("\n")) > MAX_HISTORY:
96
- self.compress_history()
97
-
98
- if action_name not in self.task_queue:
99
- self.task_queue.append(action_name)
100
-
101
- task_function = getattr(self, f"call_{action_name.lower()}")
102
- result = task_function(action_input, tools)
103
- self.task_queue.pop(0)
104
- return result
105
-
106
- def call_main(self, action_input: List[str]) -> str:
107
- resp = self.run_gpt(
108
- f"{ACTION_PROMPT}",
109
- stop_tokens=["observation:", "task:"],
110
- max_tokens=256,
111
- task=self.task,
112
- history=self.history,
113
- actions=action_input,
114
- )
115
- lines = resp.strip().strip("\n").split("\n")
116
- for line in lines:
117
- if line == "":
118
- continue
119
- if line.startswith("thought: "):
120
- self.history += f"{line}\n"
121
- action_name, action_input = parse_action(line)
122
- self.run_action(action_name, action_input)
123
- return "No valid action found."
124
-
125
- def call_set_task(self, action_input: str) -> str:
126
- self.task = action_input
127
- return f"Task updated: {self.task}"
128
-
129
- def call_modify(self, action_input: str, agent: Agent) -> str:
130
- with open(action_input, "r") as file:
131
- file_content = file.read()
132
-
133
- resp = self.run_gpt(
134
- f"{MODIFY_PROMPT}",
135
- stop_tokens=["action:", "thought:", "observation:"],
136
- max_tokens=2048,
137
- task=self.task,
138
- history=self.history,
139
- file_path=action_input,
140
- file_contents=file_content,
141
- agent=agent,
142
- )
143
- new_contents = resp.strip()
144
-
145
- with open(action_input, "w") as file:
146
- file.write(new_contents)
147
-
148
- self.history += f"observation: file successfully modified\n"
149
- return f"File modified: {action_input}"
150
-
151
- def call_read(self, action_input: str) -> str:
152
- with open(action_input, "r") as file:
153
- file_content = file.read()
154
-
155
- self.history += f"observation: {file_content}\n"
156
- return file_content
157
-
158
- def call_add(self, action_input: str) -> str:
159
- if not os.path.exists(self.directory):
160
- os.makedirs(self.directory)
161
-
162
- with open(os.path.join(self.directory, action_input), "w") as file:
163
- file.write("")
164
-
165
- self.history += f"observation: file created: {action_input}\n"
166
- return f"File created: {action_input}"
167
-
168
- def call_test(self, action_input: str) -> str:
169
- result = subprocess.run(["python", os.path.join(self.directory, action_input)], capture_output=True, text=True)
170
- error_message = result.stderr.strip()
171
-
172
- self.history += f"observation: tests {('passed' if error_message == '' else 'failed')}\n"
173
- return f"Tests {'passed' if error_message == '' else 'failed'}: {error_message}"
174
-
175
- # Global Pypelyne Instance
176
- pypelyne = Pypelyne()
177
-
178
- # Helper Functions
179
- def create_agent(name: str, agent_type: str, complexity: int) -> Agent:
180
- agent = Agent(name, agent_type, complexity)
181
- pypelyne.add_agent(agent)
182
- return agent
183
-
184
- def create_tool(name: str, tool_type: str) -> Tool:
185
- tool = Tool(name, tool_type)
186
- pypelyne.add_tool(tool)
187
- return tool
188
-
189
- # Streamlit App Code
190
- def main():
191
- st.title("🧠 Pypelyne: Your AI-Powered Coding Assistant")
192
-
193
- # Settings
194
- st.sidebar.title("⚙️ Settings")
195
- directory = st.sidebar.text_input(
196
- "Project Directory:", value=pypelyne.directory, help="Path to your coding project"
197
  )
198
- pypelyne.directory = directory
199
 
200
- purpose = st.sidebar.text_area(
201
- "Project Purpose:",
202
- value=pypelyne.purpose,
203
- help="Describe the purpose of your coding project.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204
  )
205
- pypelyne.purpose = purpose
206
-
207
- # Agent and Tool Management
208
- st.sidebar.header("🤖 Agents")
209
- agents = st.sidebar.column(2)
210
- tools = st.sidebar.column(1)
211
-
212
- for agent in pypelyne.agents:
213
- agents.write(f"- {agent}")
214
-
215
- if st.sidebar.button("Create New Agent"):
216
- agent_name = st.sidebar.text_input("Agent Name:")
217
- agent_type = st.sidebar.selectbox("Agent Type:", ["Task Executor", "Information Retriever", "Decision Maker", "Data Analyzer"])
218
- agent_complexity = st.sidebar.slider("Complexity (1-5):", 1, 5, 3)
219
- new_agent = create_agent(agent_name, agent_type, agent_complexity)
220
- pypelyne.agents = pypelyne.agents + [new_agent]
221
-
222
- st.sidebar.header("🛠️ Tools")
223
- for tool in pypelyne.tools:
224
- tools.write(f"- {tool}")
225
-
226
- if st.sidebar.button("Create New Tool"):
227
- tool_name = st.sidebar.text_input("Tool Name:")
228
- tool_type = st.sidebar.selectbox("Tool Type:", ["Web Scraper", "Database Connector", "API Caller", "File Handler", "Text Processor"])
229
- new_tool = create_tool(tool_name, tool_type)
230
- pypelyne.tools = pypelyne.tools + [new_tool]
231
-
232
- # Main Content Area
233
- st.header("💻 Code Interaction")
234
-
235
- task = st.text_area(
236
- "🎯 Task:",
237
- value=pypelyne.task,
238
- help="Describe the coding task you want to perform.",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
239
  )
240
- if task:
241
- pypelyne.task = task
242
-
243
- user_input = st.text_input("💬 Your Input:")
244
-
245
- if st.button("Execute"):
246
- if user_input:
247
- response = pypelyne.run_action("main", [user_input])
248
- st.write("Pypelyne Says: ", response)
249
 
250
- if __name__ == "__main__":
251
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ import subprocess
3
+ import random
4
+ from huggingface_hub import InferenceClient
5
+ import gradio as gr
6
+ from safe_search import safe_search
7
+ from i_search import google
8
+ from i_search import i_search as i_s
9
+ from agent import (
10
+ ACTION_PROMPT,
11
+ ADD_PROMPT,
12
+ COMPRESS_HISTORY_PROMPT,
13
+ LOG_PROMPT,
14
+ LOG_RESPONSE,
15
+ MODIFY_PROMPT,
16
+ PREFIX,
17
+ SEARCH_QUERY,
18
+ READ_PROMPT,
19
+ TASK_PROMPT,
20
+ UNDERSTAND_TEST_RESULTS_PROMPT,
21
+ )
22
+ from utils import parse_action, parse_file_content, read_python_module_structure
23
+ from datetime import datetime
24
+ now = datetime.now()
25
+ date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
26
+
27
+ client = InferenceClient(
28
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
29
+ )
30
+
31
+ ############################################
32
+
33
+
34
+ VERBOSE = True
35
+ MAX_HISTORY = 100
36
+ #MODEL = "gpt-3.5-turbo" # "gpt-4"
37
+
38
+
39
+ def format_prompt(message, history):
40
+ prompt = "<s>"
41
+ for user_prompt, bot_response in history:
42
+ prompt += f"[INST] {user_prompt} [/INST]"
43
+ prompt += f" {bot_response}</s> "
44
+ prompt += f"[INST] {message} [/INST]"
45
+ return prompt
46
+
47
+
48
+
49
+ def run_gpt(
50
+ prompt_template,
51
+ stop_tokens,
52
+ max_tokens,
53
+ purpose,
54
+ **prompt_kwargs,
55
+ ):
56
+ seed = random.randint(1,1111111111111111)
57
+ print (seed)
58
+ generate_kwargs = dict(
59
+ temperature=1.0,
60
+ max_new_tokens=2096,
61
+ top_p=0.99,
62
+ repetition_penalty=1.0,
63
+ do_sample=True,
64
+ seed=seed,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  )
 
66
 
67
+
68
+ content = PREFIX.format(
69
+ date_time_str=date_time_str,
70
+ purpose=purpose,
71
+ safe_search=safe_search,
72
+ ) + prompt_template.format(**prompt_kwargs)
73
+ if VERBOSE:
74
+ print(LOG_PROMPT.format(content))
75
+
76
+
77
+ #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
78
+ #formatted_prompt = format_prompt(f'{content}', history)
79
+
80
+ stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
81
+ resp = ""
82
+ for response in stream:
83
+ resp += response.token.text
84
+
85
+ if VERBOSE:
86
+ print(LOG_RESPONSE.format(resp))
87
+ return resp
88
+
89
+
90
+ def compress_history(purpose, task, history, directory):
91
+ resp = run_gpt(
92
+ COMPRESS_HISTORY_PROMPT,
93
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
94
+ max_tokens=512,
95
+ purpose=purpose,
96
+ task=task,
97
+ history=history,
98
  )
99
+ history = "observation: {}\n".format(resp)
100
+ return history
101
+
102
+ def call_search(purpose, task, history, directory, action_input):
103
+ print("CALLING SEARCH")
104
+ try:
105
+
106
+ if "http" in action_input:
107
+ if "<" in action_input:
108
+ action_input = action_input.strip("<")
109
+ if ">" in action_input:
110
+ action_input = action_input.strip(">")
111
+
112
+ response = i_s(action_input)
113
+ #response = google(search_return)
114
+ print(response)
115
+ history += "observation: search result is: {}\n".format(response)
116
+ else:
117
+ history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
118
+ except Exception as e:
119
+ history += "observation: {}'\n".format(e)
120
+ return "MAIN", None, history, task
121
+
122
+ def call_main(purpose, task, history, directory, action_input):
123
+ resp = run_gpt(
124
+ ACTION_PROMPT,
125
+ stop_tokens=["observation:", "task:", "action:","thought:"],
126
+ max_tokens=2096,
127
+ purpose=purpose,
128
+ task=task,
129
+ history=history,
130
+ )
131
+ lines = resp.strip().strip("\n").split("\n")
132
+ for line in lines:
133
+ if line == "":
134
+ continue
135
+ if line.startswith("thought: "):
136
+ history += "{}\n".format(line)
137
+ elif line.startswith("action: "):
138
+
139
+ action_name, action_input = parse_action(line)
140
+ print (f'ACTION_NAME :: {action_name}')
141
+ print (f'ACTION_INPUT :: {action_input}')
142
+
143
+ history += "{}\n".format(line)
144
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
145
+ task = "END"
146
+ return action_name, action_input, history, task
147
+ else:
148
+ return action_name, action_input, history, task
149
+ else:
150
+ history += "{}\n".format(line)
151
+ #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
152
+
153
+ #return action_name, action_input, history, task
154
+ #assert False, "unknown action: {}".format(line)
155
+ return "MAIN", None, history, task
156
+
157
+
158
+ def call_set_task(purpose, task, history, directory, action_input):
159
+ task = run_gpt(
160
+ TASK_PROMPT,
161
+ stop_tokens=[],
162
+ max_tokens=64,
163
+ purpose=purpose,
164
+ task=task,
165
+ history=history,
166
+ ).strip("\n")
167
+ history += "observation: task has been updated to: {}\n".format(task)
168
+ return "MAIN", None, history, task
169
+
170
+ def end_fn(purpose, task, history, directory, action_input):
171
+ task = "END"
172
+ return "COMPLETE", "COMPLETE", history, task
173
+
174
+ NAME_TO_FUNC = {
175
+ "MAIN": call_main,
176
+ "UPDATE-TASK": call_set_task,
177
+ "SEARCH": call_search,
178
+ "COMPLETE": end_fn,
179
+
180
+ }
181
+
182
+ def run_action(purpose, task, history, directory, action_name, action_input):
183
+ print(f'action_name::{action_name}')
184
+ try:
185
+ if "RESPONSE" in action_name or "COMPLETE" in action_name:
186
+ action_name="COMPLETE"
187
+ task="END"
188
+ return action_name, "COMPLETE", history, task
189
+
190
+ # compress the history when it is long
191
+ if len(history.split("\n")) > MAX_HISTORY:
192
+ if VERBOSE:
193
+ print("COMPRESSING HISTORY")
194
+ history = compress_history(purpose, task, history, directory)
195
+ if not action_name in NAME_TO_FUNC:
196
+ action_name="MAIN"
197
+ if action_name == "" or action_name == None:
198
+ action_name="MAIN"
199
+ assert action_name in NAME_TO_FUNC
200
+
201
+ print("RUN: ", action_name, action_input)
202
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
203
+ except Exception as e:
204
+ history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
205
+
206
+ return "MAIN", None, history, task
207
+
208
+ def run(purpose,history):
209
+
210
+ #print(purpose)
211
+ #print(hist)
212
+ task=None
213
+ directory="./"
214
+ if history:
215
+ history=str(history).strip("[]")
216
+ if not history:
217
+ history = ""
218
+
219
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
220
+ action_input = None
221
+ while True:
222
+ print("")
223
+ print("")
224
+ print("---")
225
+ print("purpose:", purpose)
226
+ print("task:", task)
227
+ print("---")
228
+ print(history)
229
+ print("---")
230
+
231
+ action_name, action_input, history, task = run_action(
232
+ purpose,
233
+ task,
234
+ history,
235
+ directory,
236
+ action_name,
237
+ action_input,
238
+ )
239
+ yield (history)
240
+ #yield ("",[(purpose,history)])
241
+ if task == "END":
242
+ return (history)
243
+ #return ("", [(purpose,history)])
244
+
245
+
246
+
247
+ ################################################
248
+
249
+ def format_prompt(message, history):
250
+ prompt = "<s>"
251
+ for user_prompt, bot_response in history:
252
+ prompt += f"[INST] {user_prompt} [/INST]"
253
+ prompt += f" {bot_response}</s> "
254
+ prompt += f"[INST] {message} [/INST]"
255
+ return prompt
256
+ agents =[
257
+ "WEB_DEV",
258
+ "AI_SYSTEM_PROMPT",
259
+ "PYTHON_CODE_DEV"
260
+ ]
261
+ def generate(
262
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
263
+ ):
264
+ seed = random.randint(1,1111111111111111)
265
+
266
+ agent=prompts.WEB_DEV
267
+ if agent_name == "WEB_DEV":
268
+ agent = prompts.WEB_DEV
269
+ if agent_name == "AI_SYSTEM_PROMPT":
270
+ agent = prompts.AI_SYSTEM_PROMPT
271
+ if agent_name == "PYTHON_CODE_DEV":
272
+ agent = prompts.PYTHON_CODE_DEV
273
+ system_prompt=agent
274
+ temperature = float(temperature)
275
+ if temperature < 1e-2:
276
+ temperature = 1e-2
277
+ top_p = float(top_p)
278
+
279
+ generate_kwargs = dict(
280
+ temperature=temperature,
281
+ max_new_tokens=max_new_tokens,
282
+ top_p=top_p,
283
+ repetition_penalty=repetition_penalty,
284
+ do_sample=True,
285
+ seed=seed,
286
  )
 
 
 
 
 
 
 
 
 
287
 
288
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
289
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
290
+ output = ""
291
+
292
+ for response in stream:
293
+ output += response.token.text
294
+ yield output
295
+ return output
296
+
297
+
298
+ additional_inputs=[
299
+ gr.Dropdown(
300
+ label="Agents",
301
+ choices=[s for s in agents],
302
+ value=agents[0],
303
+ interactive=True,
304
+ ),
305
+ gr.Textbox(
306
+ label="System Prompt",
307
+ max_lines=1,
308
+ interactive=True,
309
+ ),
310
+ gr.Slider(
311
+ label="Temperature",
312
+ value=0.9,
313
+ minimum=0.0,
314
+ maximum=1.0,
315
+ step=0.05,
316
+ interactive=True,
317
+ info="Higher values produce more diverse outputs",
318
+ ),
319
+
320
+ gr.Slider(
321
+ label="Max new tokens",
322
+ value=1048*10,
323
+ minimum=0,
324
+ maximum=1048*10,
325
+ step=64,
326
+ interactive=True,
327
+ info="The maximum numbers of new tokens",
328
+ ),
329
+ gr.Slider(
330
+ label="Top-p (nucleus sampling)",
331
+ value=0.90,
332
+ minimum=0.0,
333
+ maximum=1,
334
+ step=0.05,
335
+ interactive=True,
336
+ info="Higher values sample more low-probability tokens",
337
+ ),
338
+ gr.Slider(
339
+ label="Repetition penalty",
340
+ value=1.2,
341
+ minimum=1.0,
342
+ maximum=2.0,
343
+ step=0.05,
344
+ interactive=True,
345
+ info="Penalize repeated tokens",
346
+ ),
347
+
348
+
349
+ ]
350
+
351
+ examples=[["What are the biggest news stories today?", None, None, None, None, None, ],
352
+ ["When is the next full moon?", None, None, None, None, None, ],
353
+ ["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
354
+ ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
355
+ ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None,],
356
+ ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None,],
357
+ ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None,],
358
+ ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
359
+ ]
360
+
361
+ '''
362
+ gr.ChatInterface(
363
+ fn=run,
364
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
365
+ title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
366
+ examples=examples,
367
+ concurrency_limit=20,
368
+ with gr.Blocks() as ifacea:
369
+ gr.HTML("""TEST""")
370
+ ifacea.launch()
371
+ ).launch()
372
+ with gr.Blocks() as iface:
373
+ #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
374
+ chatbot=gr.Chatbot()
375
+ msg = gr.Textbox()
376
+ with gr.Row():
377
+ submit_b = gr.Button()
378
+ clear = gr.ClearButton([msg, chatbot])
379
+ submit_b.click(run, [msg,chatbot],[msg,chatbot])
380
+ msg.submit(run, [msg, chatbot], [msg, chatbot])
381
+ iface.launch()
382
+ '''
383
+ gr.ChatInterface(
384
+ fn=run,
385
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, super-intelligence=True, layout="panel"),
386
+ title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
387
+ examples=examples,
388
+ concurrency_limit=50,
389
+ ).launch(show_api=True)