acecalisto3 commited on
Commit
d63b979
·
verified ·
1 Parent(s): 768b953

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +484 -201
app.py CHANGED
@@ -1,203 +1,486 @@
1
- import gradio as gr
 
2
  import random
3
- import time
4
- from typing import List, Dict
5
- import transformers
6
-
7
- # Define constants
8
- AGENT_TYPES = ["Task Executor", "Information Retriever", "Decision Maker", "Data Analyzer"]
9
- TOOL_TYPES = ["Web Scraper", "Database Connector", "API Caller", "File Handler", "Text Processor"]
10
-
11
- # Load the language model
12
- model_name = "t5-small"
13
- model = transformers.T5ForConditionalGeneration.from_pretrained(model_name)
14
- tokenizer = transformers.T5Tokenizer.from_pretrained(model_name)
15
-
16
- class Agent:
17
- def __init__(self, name: str, agent_type: str, complexity: int):
18
- self.name = name
19
- self.type = agent_type
20
- self.complexity = complexity
21
- self.tools = []
22
- self.behavior = {
23
- "Task Executor": self.execute_task,
24
- "Information Retriever": self.retrieve_information,
25
- "Decision Maker": self.make_decision,
26
- "Data Analyzer": self.analyze_data,
27
- }
28
-
29
- def add_tool(self, tool):
30
- self.tools.append(tool)
31
-
32
- def execute_task(self, task: str):
33
- # Simulate task execution
34
- time.sleep(2)
35
- return f"Task '{task}' executed successfully."
36
-
37
- def retrieve_information(self, query: str):
38
- # Simulate information retrieval
39
- time.sleep(2)
40
- return f"Information retrieved for query '{query}'."
41
-
42
- def make_decision(self, decision: str):
43
- # Simulate decision making
44
- time.sleep(2)
45
- return f"Decision '{decision}' made successfully."
46
-
47
- def analyze_data(self, data: str):
48
- # Simulate data analysis
49
- time.sleep(2)
50
- return f"Data '{data}' analyzed successfully."
51
-
52
- def __str__(self):
53
- return f"{self.name} ({self.type}) - Complexity: {self.complexity}"
54
-
55
- class Tool:
56
- def __init__(self, name: str, tool_type: str):
57
- self.name = name
58
- self.type = tool_type
59
- self.functionality = {
60
- "Web Scraper": self.scrape_web,
61
- "Database Connector": self.connect_database,
62
- "API Caller": self.call_api,
63
- "File Handler": self.handle_file,
64
- "Text Processor": self.process_text,
65
- }
66
-
67
- def scrape_web(self, url: str):
68
- # Simulate web scraping
69
- time.sleep(2)
70
- return f"Web scraped for URL '{url}'."
71
-
72
- def connect_database(self, database: str):
73
- # Simulate database connection
74
- time.sleep(2)
75
- return f"Connected to database '{database}'."
76
-
77
- def call_api(self, api: str):
78
- # Simulate API call
79
- time.sleep(2)
80
- return f"API '{api}' called successfully."
81
-
82
- def handle_file(self, file: str):
83
- # Simulate file handling
84
- time.sleep(2)
85
- return f"File '{file}' handled successfully."
86
-
87
- def process_text(self, text: str):
88
- # Simulate text processing
89
- time.sleep(2)
90
- return f"Text '{text}' processed successfully."
91
-
92
- def __str__(self):
93
- return f"{self.name} ({self.type})"
94
-
95
- class Pypelyne:
96
- def __init__(self):
97
- self.agents: List[Agent] = []
98
- self.tools: List[Tool] = []
99
-
100
- def add_agent(self, agent: Agent):
101
- self.agents.append(agent)
102
-
103
- def add_tool(self, tool: Tool):
104
- self.tools.append(tool)
105
-
106
- def generate_chat_app(self):
107
- # Simulate chat app generation
108
- time.sleep(2)
109
- return f"Chat app generated with {len(self.agents)} agents and {len(self.tools)} tools."
110
-
111
- def chat_with_pypelyne(message: str) -> str:
112
- # Tokenize the input message
113
- inputs = tokenizer.encode("translate English to Spanish: " + message, return_tensors="pt")
114
- # Generate the response
115
- outputs = model.generate(inputs)
116
- # Decode the response
117
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
118
- return response
119
-
120
- pypelyne = Pypelyne()
121
-
122
- def create_agent(name: str, agent_type: str, complexity: int) -> str:
123
- agent = Agent(name, agent_type, complexity)
124
- pypelyne.add_agent(agent)
125
- return f"Agent created: {agent}"
126
-
127
- def create_tool(name: str, tool_type: str) -> str:
128
- tool = Tool(name, tool_type)
129
- pypelyne.add_tool(tool)
130
- return f"Tool created: {tool}"
131
-
132
- def assign_tool(agent_name: str, tool_name: str) -> str:
133
- agent = next((a for a in pypelyne.agents if a.name == agent_name), None)
134
- tool = next((t for t in pypelyne.tools if t.name == tool_name), None)
135
-
136
- if agent and tool:
137
- agent.add_tool(tool)
138
- return f"Tool '{tool.name}' assigned to agent '{agent.name}'"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  else:
140
- return "Agent or tool not found."
141
-
142
- def generate_chat_app() -> str:
143
- return pypelyne.generate_chat_app()
144
-
145
- def list_agents() -> str:
146
- return "\n".join(str(agent) for agent in pypelyne.agents) or "No agents created yet."
147
-
148
- def list_tools() -> str:
149
- return "\n".join(str(tool) for tool in pypelyne.tools) or "No tools created yet."
150
-
151
- with gr.Blocks() as app:
152
- gr.Markdown("# Welcome to Pypelyne")
153
- gr.Markdown("Create your custom pipeline with agents and tools, then chat with it!")
154
-
155
- with gr.Tab("Create Agents"):
156
- agent_name = gr.Textbox(label="Agent Name")
157
- agent_type = gr.Dropdown(choices=AGENT_TYPES, label="Agent Type")
158
- agent_complexity = gr.Slider(minimum=1, maximum=10, step=1, label="Agent Complexity")
159
- create_agent_btn = gr.Button("Create Agent")
160
- agent_output = gr.Textbox(label="Output")
161
- create_agent_btn.click(create_agent, inputs=[agent_name, agent_type, agent_complexity], outputs=agent_output)
162
-
163
- with gr.Tab("Create Tools"):
164
- tool_name = gr.Textbox(label="Tool Name")
165
- tool_type = gr.Dropdown(choices=TOOL_TYPES, label="Tool Type")
166
- create_tool_btn = gr.Button("Create Tool")
167
- tool_output = gr.Textbox(label="Output")
168
- create_tool_btn.click(create_tool, inputs=[tool_name, tool_type], outputs=tool_output)
169
-
170
- with gr.Tab("Assign Tools"):
171
- agent_select = gr.Dropdown(choices=[], label="Select Agent")
172
- tool_select = gr.Dropdown(choices=[], label="Select Tool")
173
- assign_tool_btn = gr.Button("Assign Tool")
174
- assign_output = gr.Textbox(label="Output")
175
- assign_tool_btn.click(assign_tool, inputs=[agent_select, tool_select], outputs=assign_output)
176
-
177
- with gr.Tab("Generate Chat App"):
178
- generate_btn = gr.Button("Generate Chat App")
179
- generate_output = gr.Textbox(label="Output")
180
- generate_btn.click(generate_chat_app, outputs=generate_output)
181
-
182
- with gr.Tab("Chat with Pypelyne"):
183
- chat_input = gr.Textbox(label="Your Message")
184
- chat_output = gr.Textbox(label="Pypelyne's Response")
185
- chat_btn = gr.Button("Send")
186
- chat_btn.click(chat_with_pypelyne, inputs=chat_input, outputs=chat_output)
187
-
188
- with gr.Tab("View Pypelyne"):
189
- view_agents_btn = gr.Button("View Agents")
190
- view_tools_btn = gr.Button("View Tools")
191
- view_output = gr.Textbox(label="Pypelyne Components")
192
- view_agents_btn.click(list_agents, outputs=view_output)
193
- view_tools_btn.click(list_tools, outputs=view_output)
194
-
195
- def update_dropdowns():
196
- return gr.Dropdown.update(choices=[agent.name for agent in pypelyne.agents]), \
197
- gr.Dropdown.update(choices=[tool.name for tool in pypelyne.tools])
198
-
199
- create_agent_btn.click(update_dropdowns, outputs=[agent_select, tool_select])
200
- create_tool_btn.click(update_dropdowns, outputs=[agent_select, tool_select])
201
-
202
- if __name__ == "__main__":
203
- app.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
  import random
4
+ from huggingface_hub import InferenceClient
5
+ import gradio as gr
6
+ from safe_search import safe_search
7
+ from i_search import google
8
+ from i_search import i_search as i_s
9
+ from agent import (
10
+ ACTION_PROMPT,
11
+ ADD_PROMPT,
12
+ COMPRESS_HISTORY_PROMPT,
13
+ LOG_PROMPT,
14
+ LOG_RESPONSE,
15
+ MODIFY_PROMPT,
16
+ PREFIX,
17
+ SEARCH_QUERY,
18
+ READ_PROMPT,
19
+ TASK_PROMPT,
20
+ UNDERSTAND_TEST_RESULTS_PROMPT,
21
+ )
22
+ from utils import parse_action, parse_file_content, read_python_module_structure
23
+ from datetime import datetime
24
+ now = datetime.now()
25
+ date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
26
+
27
+ client = InferenceClient(
28
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
29
+ )
30
+
31
+ ############################################
32
+
33
+
34
+ VERBOSE = True
35
+ MAX_HISTORY = 100
36
+ #MODEL = "gpt-3.5-turbo" # "gpt-4"
37
+
38
+
39
+ def format_prompt(message, history):
40
+ prompt = "<s>"
41
+ for user_prompt, bot_response in history:
42
+ prompt += f"[INST] {user_prompt} [/INST]"
43
+ prompt += f" {bot_response}</s> "
44
+ prompt += f"[INST] {message} [/INST]"
45
+ return prompt
46
+
47
+
48
+
49
+ def run_gpt(
50
+ prompt_template,
51
+ stop_tokens,
52
+ max_tokens,
53
+ module_summary,
54
+ purpose,
55
+ **prompt_kwargs,
56
+ ):
57
+ seed = random.randint(1,1111111111111111)
58
+
59
+ generate_kwargs = dict(
60
+ temperature=0.9,
61
+ max_new_tokens=1048,
62
+ top_p=0.95,
63
+ repetition_penalty=1.0,
64
+ do_sample=True,
65
+ seed=seed,
66
+ )
67
+
68
+
69
+ content = PREFIX.format(
70
+ date_time_str=date_time_str,
71
+ purpose=purpose,
72
+ safe_search=safe_search,
73
+ ) + prompt_template.format(**prompt_kwargs)
74
+ if VERBOSE:
75
+ print(LOG_PROMPT.format(content))
76
+
77
+
78
+ #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
79
+ #formatted_prompt = format_prompt(f'{content}', history)
80
+
81
+ stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
82
+ resp = ""
83
+ for response in stream:
84
+ resp += response.token.text
85
+
86
+ if VERBOSE:
87
+ print(LOG_RESPONSE.format(resp))
88
+ return resp
89
+
90
+
91
+ def compress_history(purpose, task, history, directory):
92
+ module_summary, _, _ = read_python_module_structure(directory)
93
+ resp = run_gpt(
94
+ COMPRESS_HISTORY_PROMPT,
95
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
96
+ max_tokens=512,
97
+ module_summary=module_summary,
98
+ purpose=purpose,
99
+ task=task,
100
+ history=history,
101
+ )
102
+ history = "observation: {}\n".format(resp)
103
+ return history
104
+
105
+ def call_search(purpose, task, history, directory, action_input):
106
+ print("CALLING SEARCH")
107
+ try:
108
+ if "http" in action_input:
109
+ if "<" in action_input:
110
+ action_input = action_input.strip("<")
111
+ if ">" in action_input:
112
+ action_input = action_input.strip(">")
113
+ response = i_s(action_input)
114
+ #response = google(search_return)
115
+ print(response)
116
+ history += "observation: search result is: {}\n".format(response)
117
+ else:
118
+ history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
119
+ except Exception as e:
120
+ history += "observation: {}'\n".format(e)
121
+ return "MAIN", None, history, task
122
+
123
+ def call_main(purpose, task, history, directory, action_input):
124
+ module_summary, _, _ = read_python_module_structure(directory)
125
+ resp = run_gpt(
126
+ ACTION_PROMPT,
127
+ stop_tokens=["observation:", "task:"],
128
+ max_tokens=256,
129
+ module_summary=module_summary,
130
+ purpose=purpose,
131
+ task=task,
132
+ history=history,
133
+ )
134
+ lines = resp.strip().strip("\n").split("\n")
135
+ for line in lines:
136
+ if line == "":
137
+ continue
138
+ if line.startswith("thought: "):
139
+ history += "{}\n".format(line)
140
+ elif line.startswith("action: "):
141
+
142
+ action_name, action_input = parse_action(line)
143
+ print (f'ACTION_NAME :: {action_name}')
144
+ print (f'ACTION_INPUT :: {action_input}')
145
+
146
+ history += "{}\n".format(line)
147
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
148
+ task = "END"
149
+ return action_name, action_input, history, task
150
+ else:
151
+ return action_name, action_input, history, task
152
+ else:
153
+ history += "{}\n".format(line)
154
+ #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
155
+
156
+ #return action_name, action_input, history, task
157
+ #assert False, "unknown action: {}".format(line)
158
+ return "MAIN", None, history, task
159
+
160
+
161
+ def call_test(purpose, task, history, directory, action_input):
162
+ result = subprocess.run(
163
+ ["python", "-m", "pytest", "--collect-only", directory],
164
+ capture_output=True,
165
+ text=True,
166
+ )
167
+ if result.returncode != 0:
168
+ history += "observation: there are no tests! Test should be written in a test folder under {}\n".format(
169
+ directory
170
+ )
171
+ return "MAIN", None, history, task
172
+ result = subprocess.run(
173
+ ["python", "-m", "pytest", directory], capture_output=True, text=True
174
+ )
175
+ if result.returncode == 0:
176
+ history += "observation: tests pass\n"
177
+ return "MAIN", None, history, task
178
+ module_summary, content, _ = read_python_module_structure(directory)
179
+ resp = run_gpt(
180
+ UNDERSTAND_TEST_RESULTS_PROMPT,
181
+ stop_tokens=[],
182
+ max_tokens=256,
183
+ module_summary=module_summary,
184
+ purpose=purpose,
185
+ task=task,
186
+ history=history,
187
+ stdout=result.stdout[:5000], # limit amount of text
188
+ stderr=result.stderr[:5000], # limit amount of text
189
+ )
190
+ history += "observation: tests failed: {}\n".format(resp)
191
+ return "MAIN", None, history, task
192
+
193
+
194
+ def call_set_task(purpose, task, history, directory, action_input):
195
+ module_summary, content, _ = read_python_module_structure(directory)
196
+ task = run_gpt(
197
+ TASK_PROMPT,
198
+ stop_tokens=[],
199
+ max_tokens=64,
200
+ module_summary=module_summary,
201
+ purpose=purpose,
202
+ task=task,
203
+ history=history,
204
+ ).strip("\n")
205
+ history += "observation: task has been updated to: {}\n".format(task)
206
+ return "MAIN", None, history, task
207
+
208
+
209
+ def call_read(purpose, task, history, directory, action_input):
210
+ if not os.path.exists(action_input):
211
+ history += "observation: file does not exist\n"
212
+ return "MAIN", None, history, task
213
+ module_summary, content, _ = read_python_module_structure(directory)
214
+ f_content = (
215
+ content[action_input] if content[action_input] else "< document is empty >"
216
+ )
217
+ resp = run_gpt(
218
+ READ_PROMPT,
219
+ stop_tokens=[],
220
+ max_tokens=256,
221
+ module_summary=module_summary,
222
+ purpose=purpose,
223
+ task=task,
224
+ history=history,
225
+ file_path=action_input,
226
+ file_contents=f_content,
227
+ ).strip("\n")
228
+ history += "observation: {}\n".format(resp)
229
+ return "MAIN", None, history, task
230
+
231
+
232
+ def call_modify(purpose, task, history, directory, action_input):
233
+ if not os.path.exists(action_input):
234
+ history += "observation: file does not exist\n"
235
+ return "MAIN", None, history, task
236
+ (
237
+ module_summary,
238
+ content,
239
+ _,
240
+ ) = read_python_module_structure(directory)
241
+ f_content = (
242
+ content[action_input] if content[action_input] else "< document is empty >"
243
+ )
244
+ resp = run_gpt(
245
+ MODIFY_PROMPT,
246
+ stop_tokens=["action:", "thought:", "observation:"],
247
+ max_tokens=2048,
248
+ module_summary=module_summary,
249
+ purpose=purpose,
250
+ task=task,
251
+ history=history,
252
+ file_path=action_input,
253
+ file_contents=f_content,
254
+ )
255
+ new_contents, description = parse_file_content(resp)
256
+ if new_contents is None:
257
+ history += "observation: failed to modify file\n"
258
+ return "MAIN", None, history, task
259
+
260
+ with open(action_input, "w") as f:
261
+ f.write(new_contents)
262
+
263
+ history += "observation: file successfully modified\n"
264
+ history += "observation: {}\n".format(description)
265
+ return "MAIN", None, history, task
266
+
267
+
268
+ def call_add(purpose, task, history, directory, action_input):
269
+ d = os.path.dirname(action_input)
270
+ if not d.startswith(directory):
271
+ history += "observation: files must be under directory {}\n".format(directory)
272
+ elif not action_input.endswith(".py"):
273
+ history += "observation: can only write .py files\n"
274
  else:
275
+ if d and not os.path.exists(d):
276
+ os.makedirs(d)
277
+ if not os.path.exists(action_input):
278
+ module_summary, _, _ = read_python_module_structure(directory)
279
+ resp = run_gpt(
280
+ ADD_PROMPT,
281
+ stop_tokens=["action:", "thought:", "observation:"],
282
+ max_tokens=2048,
283
+ module_summary=module_summary,
284
+ purpose=purpose,
285
+ task=task,
286
+ history=history,
287
+ file_path=action_input,
288
+ )
289
+ new_contents, description = parse_file_content(resp)
290
+ if new_contents is None:
291
+ history += "observation: failed to write file\n"
292
+ return "MAIN", None, history, task
293
+
294
+ with open(action_input, "w") as f:
295
+ f.write(new_contents)
296
+
297
+ history += "observation: file successfully written\n"
298
+ history += "obsertation: {}\n".format(description)
299
+ else:
300
+ history += "observation: file already exists\n"
301
+ return "MAIN", None, history, task
302
+ def end_fn(purpose, task, history, directory, action_input):
303
+ task = "END"
304
+ return "COMPLETE", None, history, task
305
+ NAME_TO_FUNC = {
306
+ "MAIN": call_main,
307
+ "UPDATE-TASK": call_set_task,
308
+ "SEARCH": call_search,
309
+ "COMPLETE": end_fn,
310
+
311
+ }
312
+
313
+
314
+ def run_action(purpose, task, history, directory, action_name, action_input):
315
+ if "RESPONSE" in action_name:
316
+ task="END"
317
+ return action_name, action_input, history, task
318
+
319
+ # compress the history when it is long
320
+ if len(history.split("\n")) > MAX_HISTORY:
321
+ if VERBOSE:
322
+ print("COMPRESSING HISTORY")
323
+ history = compress_history(purpose, task, history, directory)
324
+
325
+ assert action_name in NAME_TO_FUNC
326
+
327
+ print("RUN: ", action_name, action_input)
328
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
329
+
330
+
331
+ def run(purpose,hist):
332
+
333
+ print(purpose)
334
+ print(hist)
335
+ task=None
336
+ directory="./"
337
+ history = ""
338
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
339
+ action_input = None
340
+ while True:
341
+ print("")
342
+ print("")
343
+ print("---")
344
+ print("purpose:", purpose)
345
+ print("task:", task)
346
+ print("---")
347
+ print(history)
348
+ print("---")
349
+
350
+ action_name, action_input, history, task = run_action(
351
+ purpose,
352
+ task,
353
+ history,
354
+ directory,
355
+ action_name,
356
+ action_input,
357
+ )
358
+ if task == "END":
359
+ return history
360
+
361
+
362
+
363
+ ################################################
364
+
365
+ def format_prompt(message, history):
366
+ prompt = "<s>"
367
+ for user_prompt, bot_response in history:
368
+ prompt += f"[INST] {user_prompt} [/INST]"
369
+ prompt += f" {bot_response}</s> "
370
+ prompt += f"[INST] {message} [/INST]"
371
+ return prompt
372
+ agents =[
373
+ "WEB_DEV",
374
+ "AI_SYSTEM_PROMPT",
375
+ "PYTHON_CODE_DEV"
376
+ ]
377
+ def generate(
378
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
379
+ ):
380
+ seed = random.randint(1,1111111111111111)
381
+
382
+ agent=prompts.WEB_DEV
383
+ if agent_name == "WEB_DEV":
384
+ agent = prompts.WEB_DEV
385
+ if agent_name == "AI_SYSTEM_PROMPT":
386
+ agent = prompts.AI_SYSTEM_PROMPT
387
+ if agent_name == "PYTHON_CODE_DEV":
388
+ agent = prompts.PYTHON_CODE_DEV
389
+ system_prompt=agent
390
+ temperature = float(temperature)
391
+ if temperature < 1e-2:
392
+ temperature = 1e-2
393
+ top_p = float(top_p)
394
+
395
+ generate_kwargs = dict(
396
+ temperature=temperature,
397
+ max_new_tokens=max_new_tokens,
398
+ top_p=top_p,
399
+ repetition_penalty=repetition_penalty,
400
+ do_sample=True,
401
+ seed=seed,
402
+ )
403
+
404
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
405
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
406
+ output = ""
407
+
408
+ for response in stream:
409
+ output += response.token.text
410
+ yield output
411
+ return output
412
+
413
+
414
+ additional_inputs=[
415
+ gr.Dropdown(
416
+ label="Agents",
417
+ choices=[s for s in agents],
418
+ value=agents[0],
419
+ interactive=True,
420
+ ),
421
+ gr.Textbox(
422
+ label="System Prompt",
423
+ max_lines=1,
424
+ interactive=True,
425
+ ),
426
+ gr.Slider(
427
+ label="Temperature",
428
+ value=0.9,
429
+ minimum=0.0,
430
+ maximum=1.0,
431
+ step=0.05,
432
+ interactive=True,
433
+ info="Higher values produce more diverse outputs",
434
+ ),
435
+
436
+ gr.Slider(
437
+ label="Max new tokens",
438
+ value=1048*10,
439
+ minimum=0,
440
+ maximum=1048*10,
441
+ step=64,
442
+ interactive=True,
443
+ info="The maximum numbers of new tokens",
444
+ ),
445
+ gr.Slider(
446
+ label="Top-p (nucleus sampling)",
447
+ value=0.90,
448
+ minimum=0.0,
449
+ maximum=1,
450
+ step=0.05,
451
+ interactive=True,
452
+ info="Higher values sample more low-probability tokens",
453
+ ),
454
+ gr.Slider(
455
+ label="Repetition penalty",
456
+ value=1.2,
457
+ minimum=1.0,
458
+ maximum=2.0,
459
+ step=0.05,
460
+ interactive=True,
461
+ info="Penalize repeated tokens",
462
+ ),
463
+
464
+
465
+ ]
466
+
467
+ examples=[["What are the biggest news stories today?", None, None, None, None, None, ],
468
+ ["When is the next full moon?", None, None, None, None, None, ],
469
+ ["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
470
+ ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
471
+ ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None,],
472
+ ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None,],
473
+ ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None,],
474
+ ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
475
+ ]
476
+
477
+
478
+ gr.ChatInterface(
479
+ fn=run,
480
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
481
+ title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
482
+ examples=examples,
483
+ concurrency_limit=20,
484
+ ).launch(show_api=False)
485
+
486
+