acecalisto3 commited on
Commit
b55f600
·
verified ·
1 Parent(s): 459114f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +368 -234
app.py CHANGED
@@ -1,247 +1,381 @@
1
  import os
2
- import json
3
  import subprocess
4
- import re
5
- import requests
6
- from datetime import datetime
7
-
8
  import gradio as gr
9
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, TextGenerationPipeline, AutoModel, RagRetriever, AutoModelForSeq2SeqLM
10
- import torch
11
- import tree_sitter
12
- from tree_sitter import Language, Parser
13
- import black
14
- from pylint import lint
15
- from io import StringIO
16
- import sys
17
- from huggingface_hub import Repository, hf_hub_url, HfApi, snapshot_download
18
- import tempfile
19
- import logging
20
- from loguru import logger
21
- logger.add("app.log", format="{time} {level} {message}", level="INFO")
22
-
23
- # Constants
24
- MODEL_NAME = "bigscience/bloom"
25
- PROJECT_ROOT = "projects"
26
- AGENT_DIRECTORY = "agents"
27
- AVAILABLE_CODE_GENERATIVE_MODELS = [
28
- "bigcode/starcoder",
29
- "Salesforce/codegen-350M-mono",
30
- "microsoft/CodeGPT-small-py",
31
- "NinedayWang/PolyCoder-2.7B",
32
- "facebook/incoder-1B",
33
- ]
34
-
35
- # Load Models and Resources
36
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
37
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, torch_dtype=torch.float16)
38
- pipe = TextGenerationPipeline(model=model, tokenizer=tokenizer)
39
-
40
- # Build Tree-sitter parser libraries (if not already built)
41
- Language.build_library("build/my-languages.so", ["tree-sitter-python", "tree-sitter-javascript"])
42
- PYTHON_LANGUAGE = Language("build/my-languages.so", "python")
43
- JAVASCRIPT_LANGUAGE = Language("build/my-languages.so", "javascript")
44
- parser = Parser()
45
-
46
- # Session State Initialization
47
- if 'chat_history' not in gr.State.session_state:
48
- gr.State.chat_history = []
49
- if 'terminal_history' not in gr.State.session_state:
50
- gr.State.terminal_history = []
51
- if 'workspace_projects' not in gr.State.session_state:
52
- gr.State.workspace_projects = {}
53
- if 'available_agents' not in gr.State.session_state:
54
- gr.State.available_agents = []
55
- if 'current_state' not in gr.State.session_state:
56
- gr.State.current_state = {
57
- 'toolbox': {},
58
- 'workspace_chat': {}
59
- }
60
-
61
- # Define is_code function
62
- def is_code(message):
63
- return message.lstrip().startswith("```") or message.lstrip().startswith("code:")
64
-
65
- # Define agents variable
66
- agents = ["python", "javascript", "java"]
67
-
68
- # Define load_agent_from_file function
69
- def load_agent_from_file(agent_name):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  try:
71
- with open(os.path.join(AGENT_DIRECTORY, agent_name + ".json"), "r") as f:
72
- return json.load(f)
73
- except FileNotFoundError:
74
- return None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
75
 
76
- # Define load_pipeline function
77
- def load_pipeline(model_category, model_name):
78
- return available_models[model_category][model_name]
79
 
80
- # Define execute_translation function
81
- def execute_translation(code, target_language, pipe):
82
  try:
83
- output = pipe(code, max_length=1000)[0]["generated_text"]
84
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
85
  except Exception as e:
86
- logger.error(f"Error in execute_translation function: {e}")
87
- return "Error: Unable to translate code."
88
-
89
- # Refactor using CodeT5+
90
- def execute_refactoring_codet5(code: str) -> str:
91
- """
92
- Refactors the provided code using the CodeT5+ model.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
 
94
- Args:
95
- code (str): The code to refactor.
96
 
97
- Returns:
98
- str: The refactored code.
99
- """
100
- try:
101
- refactor_pipe = pipeline(
102
- "text2text-generation",
103
- model="Salesforce/codet5p-220m-finetune-Refactor"
104
- )
105
- prompt = f"Refactor this Python code:\n{code}"
106
- output = refactor_pipe(prompt, max_length=1000)[0]["generated_text"]
107
- return output
108
- except Exception as e:
109
- logger.error(f"Error in execute_refactoring_codet5 function: {e}")
110
- return "Error: Unable to refactor code."
111
-
112
- # Chat interface with agent
113
- def chat_interface_with_agent(input_text, agent_name, selected_model):
114
- """
115
- Handles interaction with the selected AI agent.
116
- """
117
- agent = load_agent_from_file(agent_name)
118
- if not agent:
119
- return f"Agent {agent_name} not found."
120
-
121
- agent.pipeline = available_models[selected_model]
122
- agent_prompt = agent.create_agent_prompt()
123
- full_prompt = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
124
 
125
- try:
126
- response = agent.generate_response(full_prompt)
127
- except Exception as e:
128
- logger.error(f"Error generating agent response: {e}")
129
- response = "Error: Unable to process your request."
130
-
131
- return response
132
-
133
- # Available models
134
- available_models = {
135
- "Code Generation & Completion": {
136
- "Salesforce CodeGen-350M (Mono)": pipeline("text-generation", model="Salesforce/codegen-350M-mono"),
137
- "BigCode StarCoder": pipeline("text-generation", model="bigcode/starcoder"),
138
- "CodeGPT-small-py": pipeline("text-generation", model="microsoft/CodeGPT-small-py"),
139
- "PolyCoder-2.7B": pipeline("text-generation", model="NinedayWang/PolyCoder-2.7B"),
140
- "InCoder-1B": pipeline("text-generation", model="facebook/incoder-1B"),
141
- },
142
- "Code Translation": {
143
- "Python to JavaScript": (lambda code, pipe=pipeline("translation", model="transformersbook/codeparrot-translation-en-java"): execute_translation(code, "javascript", pipe), []),
144
- "Python to C++": (lambda code, pipe=pipeline("text-generation", model="konodyuk/codeparrot-small-trans-py-cpp"): execute_translation(code, "cpp", pipe), []),
145
- },
146
- # ... other categories
147
- }
148
 
149
- # Gradio interface with tabs
150
- with gr.Blocks(title="AI Power Tools for Developers") as demo:
151
- # --- State ---
152
- code = gr.State("") # Use gr.State to store code across tabs
153
- task_dropdown = gr.State(list(available_models.keys())[0]) # Initialize task dropdown
154
- model_dropdown = gr.State(
155
- list(available_models[task_dropdown.value].keys())[0]
156
- ) # Initialize model dropdown
157
-
158
- def update_model_dropdown(selected_task):
159
- models_for_task = list(available_models[selected_task].keys())
160
- return gr.Dropdown.update(choices=models_for_task)
161
-
162
- with gr.Tab("Chat & Code"):
163
- chatbot = gr.Chatbot(elem_id="chatbot")
164
- msg = gr.Textbox(label="Enter your message", placeholder="Type your message here...")
 
 
 
 
 
 
 
 
 
 
165
  clear = gr.ClearButton([msg, chatbot])
166
-
167
- def user(message, history):
168
- if is_code(message):
169
- response = "" # Initialize response
170
- task = message.split()[0].lower() # Extract task keyword
171
-
172
- # Use the selected model or a default one
173
- model_category = task_dropdown.value
174
- model_name = model_dropdown.value
175
- pipeline = load_pipeline(model_category, model_name)
176
-
177
- if task in agents:
178
- agent = load_agent_from_file(task)
179
- try:
180
- response = agent.generate_response(message)
181
- except Exception as e:
182
- logger.error(f"Error executing agent {task}: {e}")
183
- response = f"Error executing agent {task}: {e}"
184
- else:
185
- response = "Invalid command or task not found."
186
- else:
187
- # Process as natural language request
188
- response = pipe(message, max_length=1000)[0]["generated_text"]
189
-
190
- return response, history + [(message, response)]
191
-
192
- msg.change(user, inputs=[msg, chatbot], outputs=[chatbot, chatbot])
193
- clear.click(lambda: None, None, chatbot, queue=False)
194
-
195
- # Model Selection Tab
196
- with gr.Tab("Model Selection"):
197
- task_dropdown.render()
198
- model_dropdown.render()
199
- task_dropdown.change(update_model_dropdown, task_dropdown, model_dropdown)
200
-
201
- # Workspace Tab
202
- with gr.Tab("Workspace"):
203
- with gr.Row():
204
- with gr.Column():
205
- code.render()
206
- file_output = gr.File(label="Save File As...", interactive=False)
207
- with gr.Column():
208
- output = gr.Textbox(label="Output")
209
-
210
- run_btn = gr.Button(value="Run Code")
211
- upload_btn = gr.UploadButton("Upload Python File", file_types=[".py"])
212
- save_button = gr.Button(value="Save Code")
213
-
214
- def run_code(code_str):
215
- try:
216
- # Save code to a temporary file
217
- with open("temp_code.py", "w") as f:
218
- f.write(code_str)
219
-
220
- # Execute the code using subprocess
221
- process = subprocess.Popen(["python", "temp_code.py"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
222
- output, error = process.communicate()
223
-
224
- # Return the output and error messages
225
- if error:
226
- return "Error: " + error.decode("utf-8")
227
- else:
228
- return output.decode("utf-8")
229
-
230
- except Exception as e:
231
- logger.error(f"Error running code: {e}")
232
- return f"Error running code: {e}"
233
-
234
- def upload_file(file):
235
- with open("uploaded_code.py", "wb") as f:
236
- f.write(file.file.getvalue())
237
- return "File uploaded successfully!"
238
-
239
- def save_code(code_str):
240
- file_output.value = code_str
241
- return file_output
242
-
243
- run_btn.click(run_code, inputs=[code], outputs=[output])
244
- upload_btn.click(upload_file, inputs=[upload_btn], outputs=[output])
245
- save_button.click(save_code, inputs=[code], outputs=[file_output])
246
-
247
- demo.launch()
 
1
  import os
 
2
  import subprocess
3
+ import random
4
+ from huggingface_hub import InferenceClient
 
 
5
  import gradio as gr
6
+ from safe_search import safe_search
7
+ from i_search import google
8
+ from i_search import i_search as i_s
9
+ from agent import (
10
+ ACTION_PROMPT,
11
+ ADD_PROMPT,
12
+ COMPRESS_HISTORY_PROMPT,
13
+ LOG_PROMPT,
14
+ LOG_RESPONSE,
15
+ MODIFY_PROMPT,
16
+ PREFIX,
17
+ SEARCH_QUERY,
18
+ READ_PROMPT,
19
+ TASK_PROMPT,
20
+ UNDERSTAND_TEST_RESULTS_PROMPT,
21
+ )
22
+ from utils import parse_action, parse_file_content, read_python_module_structure
23
+ from datetime import datetime
24
+ now = datetime.now()
25
+ date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
26
+
27
+ client = InferenceClient(
28
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
29
+ )
30
+
31
+
32
+ ############################################
33
+
34
+
35
+ VERBOSE = True
36
+ MAX_HISTORY = 125
37
+
38
+
39
+ def format_prompt(message, history):
40
+ prompt = "<s>"
41
+ for user_prompt, bot_response in history:
42
+ prompt += f"[INST] {user_prompt} [/INST]"
43
+ prompt += f" {bot_response}</s> "
44
+ prompt += f"[INST] {message} [/INST]"
45
+ return prompt
46
+
47
+
48
+
49
+ def run_gpt(
50
+ prompt_template,
51
+ stop_tokens,
52
+ max_tokens,
53
+ purpose,
54
+ **prompt_kwargs,
55
+ ):
56
+ seed = random.randint(1,1111111111111111)
57
+ print (seed)
58
+ generate_kwargs = dict(
59
+ temperature=1.0,
60
+ max_new_tokens=2096,
61
+ top_p=0.99,
62
+ repetition_penalty=1.7,
63
+ do_sample=True,
64
+ seed=seed,
65
+ )
66
+
67
+
68
+ content = PREFIX.format(
69
+ date_time_str=date_time_str,
70
+ purpose=purpose,
71
+ safe_search=safe_search,
72
+ ) + prompt_template.format(**prompt_kwargs)
73
+ if VERBOSE:
74
+ print(LOG_PROMPT.format(content))
75
+
76
+
77
+ #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
78
+ #formatted_prompt = format_prompt(f'{content}', history)
79
+
80
+ stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
81
+ resp = ""
82
+ for response in stream:
83
+ resp += response.token.text
84
+
85
+ if VERBOSE:
86
+ print(LOG_RESPONSE.format(resp))
87
+ return resp
88
+
89
+
90
+ def compress_history(purpose, task, history, directory):
91
+ resp = run_gpt(
92
+ COMPRESS_HISTORY_PROMPT,
93
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
94
+ max_tokens=5096,
95
+ purpose=purpose,
96
+ task=task,
97
+ history=history,
98
+ )
99
+ history = "observation: {}\n".format(resp)
100
+ return history
101
+
102
+ def call_search(purpose, task, history, directory, action_input):
103
+ print("CALLING SEARCH")
104
  try:
105
+
106
+ if "http" in action_input:
107
+ if "<" in action_input:
108
+ action_input = action_input.strip("<")
109
+ if ">" in action_input:
110
+ action_input = action_input.strip(">")
111
+
112
+ response = i_s(action_input)
113
+ #response = google(search_return)
114
+ print(response)
115
+ history += "observation: search result is: {}\n".format(response)
116
+ else:
117
+ history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
118
+ except Exception as e:
119
+ history += "observation: {}'\n".format(e)
120
+ return "MAIN", None, history, task
121
+
122
+ def call_main(purpose, task, history, directory, action_input):
123
+ resp = run_gpt(
124
+ ACTION_PROMPT,
125
+ stop_tokens=["observation:", "task:", "action:","though:"],
126
+ max_tokens=5096,
127
+ purpose=purpose,
128
+ task=task,
129
+ history=history,
130
+ )
131
+ lines = resp.strip().strip("\n").split("\n")
132
+ for line in lines:
133
+ if line == "":
134
+ continue
135
+ if line.startswith("thought: "):
136
+ history += "{}\n".format(line)
137
+ elif line.startswith("action: "):
138
+
139
+ action_name, action_input = parse_action(line)
140
+ print (f'ACTION_NAME :: {action_name}')
141
+ print (f'ACTION_INPUT :: {action_input}')
142
+
143
+ history += "{}\n".format(line)
144
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
145
+ task = "END"
146
+ return action_name, action_input, history, task
147
+ else:
148
+ return action_name, action_input, history, task
149
+ else:
150
+ history += "{}\n".format(line)
151
+ #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
152
+
153
+ #return action_name, action_input, history, task
154
+ #assert False, "unknown action: {}".format(line)
155
+ return "MAIN", None, history, task
156
+
157
+
158
+ def call_set_task(purpose, task, history, directory, action_input):
159
+ task = run_gpt(
160
+ TASK_PROMPT,
161
+ stop_tokens=[],
162
+ max_tokens=2048,
163
+ purpose=purpose,
164
+ task=task,
165
+ history=history,
166
+ ).strip("\n")
167
+ history += "observation: task has been updated to: {}\n".format(task)
168
+ return "MAIN", None, history, task
169
+
170
+ def end_fn(purpose, task, history, directory, action_input):
171
+ task = "END"
172
+ return "COMPLETE", "COMPLETE", history, task
173
+
174
+ NAME_TO_FUNC = {
175
+ "MAIN": call_main,
176
+ "UPDATE-TASK": call_set_task,
177
+ "SEARCH": call_search,
178
+ "COMPLETE": end_fn,
179
 
180
+ }
 
 
181
 
182
+ def run_action(purpose, task, history, directory, action_name, action_input):
183
+ print(f'action_name::{action_name}')
184
  try:
185
+ if "RESPONSE" in action_name or "COMPLETE" in action_name:
186
+ action_name="COMPLETE"
187
+ task="END"
188
+ return action_name, "COMPLETE", history, task
189
+
190
+ # compress the history when it is long
191
+ if len(history.split("\n")) > MAX_HISTORY:
192
+ if VERBOSE:
193
+ print("COMPRESSING HISTORY")
194
+ history = compress_history(purpose, task, history, directory)
195
+ if not action_name in NAME_TO_FUNC:
196
+ action_name="MAIN"
197
+ if action_name == "" or action_name == None:
198
+ action_name="MAIN"
199
+ assert action_name in NAME_TO_FUNC
200
+
201
+ print("RUN: ", action_name, action_input)
202
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
203
  except Exception as e:
204
+ history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
205
+
206
+ return "MAIN", None, history, task
207
+
208
+ def run(purpose,history):
209
+
210
+ #print(purpose)
211
+ #print(hist)
212
+ task=None
213
+ directory="./"
214
+ if history:
215
+ history=str(history).strip("[]")
216
+ if not history:
217
+ history = ""
218
+
219
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
220
+ action_input = None
221
+ while True:
222
+ print("")
223
+ print("")
224
+ print("---")
225
+ print("purpose:", purpose)
226
+ print("task:", task)
227
+ print("---")
228
+ print(history)
229
+ print("---")
230
+
231
+ action_name, action_input, history, task = run_action(
232
+ purpose,
233
+ task,
234
+ history,
235
+ directory,
236
+ action_name,
237
+ action_input,
238
+ )
239
+ yield (history)
240
+ #yield ("",[(purpose,history)])
241
+ if task == "END":
242
+ return (history)
243
+ #return ("", [(purpose,history)])
244
+
245
+
246
+
247
+ ################################################
248
+
249
+ def format_prompt(message, history):
250
+ prompt = "<s>"
251
+ for user_prompt, bot_response in history:
252
+ prompt += f"[INST] {user_prompt} [/INST]"
253
+ prompt += f" {bot_response}</s> "
254
+ prompt += f"[INST] {message} [/INST]"
255
+ return prompt
256
+ agents =[
257
+ "WEB_DEV",
258
+ "AI_SYSTEM_PROMPT",
259
+ "PYTHON_CODE_DEV"
260
+ ]
261
+ def generate(
262
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
263
+ ):
264
+ seed = random.randint(1,1111111111111111)
265
+
266
+ agent=prompts.WEB_DEV
267
+ if agent_name == "WEB_DEV":
268
+ agent = prompts.WEB_DEV
269
+ if agent_name == "AI_SYSTEM_PROMPT":
270
+ agent = prompts.AI_SYSTEM_PROMPT
271
+ if agent_name == "PYTHON_CODE_DEV":
272
+ agent = prompts.PYTHON_CODE_DEV
273
+ system_prompt=agent
274
+ temperature = float(temperature)
275
+ if temperature < 1e-2:
276
+ temperature = 1e-2
277
+ top_p = float(top_p)
278
+
279
+ generate_kwargs = dict(
280
+ temperature=temperature,
281
+ max_new_tokens=max_new_tokens,
282
+ top_p=top_p,
283
+ repetition_penalty=repetition_penalty,
284
+ do_sample=True,
285
+ seed=seed,
286
+ )
287
+
288
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
289
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
290
+ output = ""
291
+
292
+ for response in stream:
293
+ output += response.token.text
294
+ yield output
295
+ return output
296
+
297
+
298
+ additional_inputs=[
299
+ gr.Dropdown(
300
+ label="Agents",
301
+ choices=[s for s in agents],
302
+ value=agents[0],
303
+ interactive=True,
304
+ ),
305
+ gr.Textbox(
306
+ label="System Prompt",
307
+ max_lines=1,
308
+ interactive=True,
309
+ ),
310
+ gr.Slider(
311
+ label="Temperature",
312
+ value=0.9,
313
+ minimum=0.0,
314
+ maximum=1.0,
315
+ step=0.05,
316
+ interactive=True,
317
+ info="Higher values produce more diverse outputs",
318
+ ),
319
+
320
+ gr.Slider(
321
+ label="Max new tokens",
322
+ value=1048*10,
323
+ minimum=0,
324
+ maximum=1048*10,
325
+ step=64,
326
+ interactive=True,
327
+ info="The maximum numbers of new tokens",
328
+ ),
329
+ gr.Slider(
330
+ label="Top-p (nucleus sampling)",
331
+ value=0.90,
332
+ minimum=0.0,
333
+ maximum=1,
334
+ step=0.05,
335
+ interactive=True,
336
+ info="Higher values sample more low-probability tokens",
337
+ ),
338
+ gr.Slider(
339
+ label="Repetition penalty",
340
+ value=1.2,
341
+ minimum=1.0,
342
+ maximum=2.0,
343
+ step=0.05,
344
+ interactive=True,
345
+ info="Penalize repeated tokens",
346
+ ),
347
 
 
 
348
 
349
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
350
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351
 
352
+ examples=[["Based on previous interactions, generate an interactive preview of the user's requested application.", None, None, None, None, None, ],
353
+ ["Utilize the relevant code snippets and components from previous interactions.", None, None, None, None, None, ],
354
+ ["Assemble a working demo that showcases the core functionality of the application.", None, None, None, None, None, ],
355
+ ["Present the demo in an interactive environment within the Gradio interface.", None, None, None, None, None,],
356
+ ["Allow the user to explore and interact with the demo to test its features.",, None, None, None, None, None,],
357
+ ["Gather feedback from the user about the demo and potential improvements.", None, None, None, None, None,],
358
+ ["If the user approves of the app's running state you should provide a bash script that will automate all aspects of a local run and also a docker image for ease-of-launch in addition to the huggingface-ready app.py with all functions and gui and the requirements.txt file comprised of all required libraries and packages the application is dependent on, avoiding openai api at all points as we only use huggingface transformers, models, agents, libraries, api.", None, None, None, None, None,],
359
+
360
+ ]
361
+
362
+
363
+ gr.ChatInterface(
364
+ fn=run,
365
+ title="""Fragmixt\nAgents With Agents,\nSurf With a Purpose""",
366
+ examples=examples,
367
+ concurrency_limit=20,
368
+ with gr.Blocks() as ifacea:
369
+ gr.HTML("""TEST""")
370
+ ifacea.launch()
371
+ ).launch()
372
+ with gr.Blocks() as iface:
373
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
374
+ chatbot=gr.Chatbot()
375
+ msg = gr.Textbox()
376
+ with gr.Row():
377
+ submit_b = gr.Button()
378
  clear = gr.ClearButton([msg, chatbot])
379
+ submit_b.click(run, [msg,chatbot],[msg,chatbot])
380
+ msg.submit(run, [msg, chatbot], [msg, chatbot])
381
+ iface.launch()