acecalisto3 commited on
Commit
772ccd5
·
verified ·
1 Parent(s): 585fd7b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -60
app.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import os
2
  import subprocess
3
  import random
@@ -28,14 +29,11 @@ client = InferenceClient(
28
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
29
  )
30
 
31
-
32
  ############################################
33
 
34
-
35
  VERBOSE = True
36
  MAX_HISTORY = 125
37
 
38
-
39
  def format_prompt(message, history):
40
  prompt = "<s>"
41
  for user_prompt, bot_response in history:
@@ -44,8 +42,6 @@ def format_prompt(message, history):
44
  prompt += f"[INST] {message} [/INST]"
45
  return prompt
46
 
47
-
48
-
49
  def run_gpt(
50
  prompt_template,
51
  stop_tokens,
@@ -53,8 +49,8 @@ def run_gpt(
53
  purpose,
54
  **prompt_kwargs,
55
  ):
56
- seed = random.randint(1,1111111111111111)
57
- print (seed)
58
  generate_kwargs = dict(
59
  temperature=1.0,
60
  max_new_tokens=2096,
@@ -64,7 +60,6 @@ def run_gpt(
64
  seed=seed,
65
  )
66
 
67
-
68
  content = PREFIX.format(
69
  date_time_str=date_time_str,
70
  purpose=purpose,
@@ -72,8 +67,7 @@ def run_gpt(
72
  ) + prompt_template.format(**prompt_kwargs)
73
  if VERBOSE:
74
  print(LOG_PROMPT.format(content))
75
-
76
-
77
  #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
78
  #formatted_prompt = format_prompt(f'{content}', history)
79
 
@@ -86,7 +80,6 @@ def run_gpt(
86
  print(LOG_RESPONSE.format(resp))
87
  return resp
88
 
89
-
90
  def compress_history(purpose, task, history, directory):
91
  resp = run_gpt(
92
  COMPRESS_HISTORY_PROMPT,
@@ -98,17 +91,16 @@ def compress_history(purpose, task, history, directory):
98
  )
99
  history = "observation: {}\n".format(resp)
100
  return history
101
-
102
  def call_search(purpose, task, history, directory, action_input):
103
  print("CALLING SEARCH")
104
  try:
105
-
106
  if "http" in action_input:
107
  if "<" in action_input:
108
  action_input = action_input.strip("<")
109
  if ">" in action_input:
110
  action_input = action_input.strip(">")
111
-
112
  response = i_s(action_input)
113
  #response = google(search_return)
114
  print(response)
@@ -135,11 +127,9 @@ def call_main(purpose, task, history, directory, action_input):
135
  if line.startswith("thought: "):
136
  history += "{}\n".format(line)
137
  elif line.startswith("action: "):
138
-
139
  action_name, action_input = parse_action(line)
140
- print (f'ACTION_NAME :: {action_name}')
141
- print (f'ACTION_INPUT :: {action_input}')
142
-
143
  history += "{}\n".format(line)
144
  if "COMPLETE" in action_name or "COMPLETE" in action_input:
145
  task = "END"
@@ -149,12 +139,10 @@ def call_main(purpose, task, history, directory, action_input):
149
  else:
150
  history += "{}\n".format(line)
151
  #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
152
-
153
  #return action_name, action_input, history, task
154
  #assert False, "unknown action: {}".format(line)
155
  return "MAIN", None, history, task
156
 
157
-
158
  def call_set_task(purpose, task, history, directory, action_input):
159
  task = run_gpt(
160
  TASK_PROMPT,
@@ -176,46 +164,41 @@ NAME_TO_FUNC = {
176
  "UPDATE-TASK": call_set_task,
177
  "SEARCH": call_search,
178
  "COMPLETE": end_fn,
179
-
180
  }
181
 
182
  def run_action(purpose, task, history, directory, action_name, action_input):
183
  print(f'action_name::{action_name}')
184
  try:
185
  if "RESPONSE" in action_name or "COMPLETE" in action_name:
186
- action_name="COMPLETE"
187
- task="END"
188
  return action_name, "COMPLETE", history, task
189
-
190
  # compress the history when it is long
191
  if len(history.split("\n")) > MAX_HISTORY:
192
  if VERBOSE:
193
  print("COMPRESSING HISTORY")
194
  history = compress_history(purpose, task, history, directory)
195
  if not action_name in NAME_TO_FUNC:
196
- action_name="MAIN"
197
- if action_name == "" or action_name == None:
198
- action_name="MAIN"
199
  assert action_name in NAME_TO_FUNC
200
-
201
  print("RUN: ", action_name, action_input)
202
  return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
203
  except Exception as e:
204
  history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
205
-
206
  return "MAIN", None, history, task
207
 
208
- def run(purpose,history):
209
-
210
- #print(purpose)
211
- #print(hist)
212
- task=None
213
- directory="./"
214
  if history:
215
- history=str(history).strip("[]")
216
  if not history:
217
  history = ""
218
-
219
  action_name = "UPDATE-TASK" if task is None else "MAIN"
220
  action_input = None
221
  while True:
@@ -242,8 +225,6 @@ def run(purpose,history):
242
  return (history)
243
  #return ("", [(purpose,history)])
244
 
245
-
246
-
247
  ################################################
248
 
249
  def format_prompt(message, history):
@@ -253,7 +234,8 @@ def format_prompt(message, history):
253
  prompt += f" {bot_response}</s> "
254
  prompt += f"[INST] {message} [/INST]"
255
  return prompt
256
- agents =[
 
257
  "WEB_DEV",
258
  "AI_SYSTEM_PROMPT",
259
  "PYTHON_CODE_DEV"
@@ -262,16 +244,16 @@ agents =[
262
  def generate(
263
  prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
264
  ):
265
- seed = random.randint(1,1111111111111111)
266
 
267
- agent=prompts.WEB_DEV
268
  if agent_name == "WEB_DEV":
269
  agent = prompts.WEB_DEV
270
  if agent_name == "AI_SYSTEM_PROMPT":
271
  agent = prompts.AI_SYSTEM_PROMPT
272
  if agent_name == "PYTHON_CODE_DEV":
273
- agent = prompts.PYTHON_CODE_DEV
274
- system_prompt=agent
275
  temperature = float(temperature)
276
  if temperature < 1e-2:
277
  temperature = 1e-2
@@ -295,14 +277,13 @@ def generate(
295
  yield output
296
  return output
297
 
298
-
299
- additional_inputs=[
300
  gr.Dropdown(
301
  label="Agents",
302
  choices=[s for s in agents],
303
  value=agents[0],
304
  interactive=True,
305
- ),
306
  gr.Textbox(
307
  label="System Prompt",
308
  max_lines=1,
@@ -320,9 +301,9 @@ additional_inputs=[
320
 
321
  gr.Slider(
322
  label="Max new tokens",
323
- value=1048*10,
324
  minimum=0,
325
- maximum=1048*10,
326
  step=64,
327
  interactive=True,
328
  info="The maximum numbers of new tokens",
@@ -345,19 +326,18 @@ additional_inputs=[
345
  interactive=True,
346
  info="Penalize repeated tokens",
347
  ),
348
-
349
-
350
  ]
351
 
 
 
 
 
 
 
 
 
 
352
 
353
- examples=[["Based on previous interactions, generate an interactive preview of the user's requested application.", None, None, None, None, None, ],
354
- ["Utilize the relevant code snippets and components from previous interactions.", None, None, None, None, None, ],
355
- ["Assemble a working demo that showcases the core functionality of the application.", None, None, None, None, None, ],
356
- ["Present the demo in an interactive environment within the Gradio interface.", None, None, None, None, None,],
357
- ["Allow the user to explore and interact with the demo to test its features.", None, None, None, None, None,],
358
- ["Gather feedback from the user about the demo and potential improvements.", None, None, None, None, None,],
359
- ["If the user approves of the app's running state you should provide a bash script that will automate all aspects of a local run and also a docker image for ease-of-launch in addition to the huggingface-ready app.py with all functions and gui and the requirements.txt file comprised of all required libraries and packages the application is dependent on, avoiding openai api at all points as we only use huggingface transformers, models, agents, libraries, api.", None, None, None, None, None,],
360
- ]
361
  def create_interface():
362
  with gr.Blocks() as iface:
363
  gr.ChatInterface(
@@ -366,4 +346,31 @@ def create_interface():
366
  examples=examples,
367
  additional_inputs=additional_inputs,
368
  )
369
- return iface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ```python
2
  import os
3
  import subprocess
4
  import random
 
29
  "mistralai/Mixtral-8x7B-Instruct-v0.1",
30
  )
31
 
 
32
  ############################################
33
 
 
34
  VERBOSE = True
35
  MAX_HISTORY = 125
36
 
 
37
  def format_prompt(message, history):
38
  prompt = "<s>"
39
  for user_prompt, bot_response in history:
 
42
  prompt += f"[INST] {message} [/INST]"
43
  return prompt
44
 
 
 
45
  def run_gpt(
46
  prompt_template,
47
  stop_tokens,
 
49
  purpose,
50
  **prompt_kwargs,
51
  ):
52
+ seed = random.randint(1, 1111111111111111)
53
+ print(seed)
54
  generate_kwargs = dict(
55
  temperature=1.0,
56
  max_new_tokens=2096,
 
60
  seed=seed,
61
  )
62
 
 
63
  content = PREFIX.format(
64
  date_time_str=date_time_str,
65
  purpose=purpose,
 
67
  ) + prompt_template.format(**prompt_kwargs)
68
  if VERBOSE:
69
  print(LOG_PROMPT.format(content))
70
+
 
71
  #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
72
  #formatted_prompt = format_prompt(f'{content}', history)
73
 
 
80
  print(LOG_RESPONSE.format(resp))
81
  return resp
82
 
 
83
  def compress_history(purpose, task, history, directory):
84
  resp = run_gpt(
85
  COMPRESS_HISTORY_PROMPT,
 
91
  )
92
  history = "observation: {}\n".format(resp)
93
  return history
94
+
95
  def call_search(purpose, task, history, directory, action_input):
96
  print("CALLING SEARCH")
97
  try:
 
98
  if "http" in action_input:
99
  if "<" in action_input:
100
  action_input = action_input.strip("<")
101
  if ">" in action_input:
102
  action_input = action_input.strip(">")
103
+
104
  response = i_s(action_input)
105
  #response = google(search_return)
106
  print(response)
 
127
  if line.startswith("thought: "):
128
  history += "{}\n".format(line)
129
  elif line.startswith("action: "):
 
130
  action_name, action_input = parse_action(line)
131
+ print(f'ACTION_NAME :: {action_name}')
132
+ print(f'ACTION_INPUT :: {action_input}')
 
133
  history += "{}\n".format(line)
134
  if "COMPLETE" in action_name or "COMPLETE" in action_input:
135
  task = "END"
 
139
  else:
140
  history += "{}\n".format(line)
141
  #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
 
142
  #return action_name, action_input, history, task
143
  #assert False, "unknown action: {}".format(line)
144
  return "MAIN", None, history, task
145
 
 
146
  def call_set_task(purpose, task, history, directory, action_input):
147
  task = run_gpt(
148
  TASK_PROMPT,
 
164
  "UPDATE-TASK": call_set_task,
165
  "SEARCH": call_search,
166
  "COMPLETE": end_fn,
 
167
  }
168
 
169
  def run_action(purpose, task, history, directory, action_name, action_input):
170
  print(f'action_name::{action_name}')
171
  try:
172
  if "RESPONSE" in action_name or "COMPLETE" in action_name:
173
+ action_name = "COMPLETE"
174
+ task = "END"
175
  return action_name, "COMPLETE", history, task
176
+
177
  # compress the history when it is long
178
  if len(history.split("\n")) > MAX_HISTORY:
179
  if VERBOSE:
180
  print("COMPRESSING HISTORY")
181
  history = compress_history(purpose, task, history, directory)
182
  if not action_name in NAME_TO_FUNC:
183
+ action_name = "MAIN"
184
+ if action_name == "" or action_name is None:
185
+ action_name = "MAIN"
186
  assert action_name in NAME_TO_FUNC
187
+
188
  print("RUN: ", action_name, action_input)
189
  return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
190
  except Exception as e:
191
  history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
 
192
  return "MAIN", None, history, task
193
 
194
+ def run(purpose, history):
195
+ task = None
196
+ directory = "./"
 
 
 
197
  if history:
198
+ history = str(history).strip("[]")
199
  if not history:
200
  history = ""
201
+
202
  action_name = "UPDATE-TASK" if task is None else "MAIN"
203
  action_input = None
204
  while True:
 
225
  return (history)
226
  #return ("", [(purpose,history)])
227
 
 
 
228
  ################################################
229
 
230
  def format_prompt(message, history):
 
234
  prompt += f" {bot_response}</s> "
235
  prompt += f"[INST] {message} [/INST]"
236
  return prompt
237
+
238
+ agents = [
239
  "WEB_DEV",
240
  "AI_SYSTEM_PROMPT",
241
  "PYTHON_CODE_DEV"
 
244
  def generate(
245
  prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
246
  ):
247
+ seed = random.randint(1, 1111111111111111)
248
 
249
+ agent = prompts.WEB_DEV
250
  if agent_name == "WEB_DEV":
251
  agent = prompts.WEB_DEV
252
  if agent_name == "AI_SYSTEM_PROMPT":
253
  agent = prompts.AI_SYSTEM_PROMPT
254
  if agent_name == "PYTHON_CODE_DEV":
255
+ agent = prompts.PYTHON_CODE_DEV
256
+ system_prompt = agent
257
  temperature = float(temperature)
258
  if temperature < 1e-2:
259
  temperature = 1e-2
 
277
  yield output
278
  return output
279
 
280
+ additional_inputs = [
 
281
  gr.Dropdown(
282
  label="Agents",
283
  choices=[s for s in agents],
284
  value=agents[0],
285
  interactive=True,
286
+ ),
287
  gr.Textbox(
288
  label="System Prompt",
289
  max_lines=1,
 
301
 
302
  gr.Slider(
303
  label="Max new tokens",
304
+ value=1048 * 10,
305
  minimum=0,
306
+ maximum=1048 * 10,
307
  step=64,
308
  interactive=True,
309
  info="The maximum numbers of new tokens",
 
326
  interactive=True,
327
  info="Penalize repeated tokens",
328
  ),
 
 
329
  ]
330
 
331
+ examples = [
332
+ ["Based on previous interactions, generate an interactive preview of the user's requested application.", None, None, None, None, None, ],
333
+ ["Utilize the relevant code snippets and components from previous interactions.", None, None, None, None, None, ],
334
+ ["Assemble a working demo that showcases the core functionality of the application.", None, None, None, None, None, ],
335
+ ["Present the demo in an interactive environment within the Gradio interface.", None, None, None, None, None, ],
336
+ ["Allow the user to explore and interact with the demo to test its features.", None, None, None, None, None, ],
337
+ ["Gather feedback from the user about the demo and potential improvements.", None, None, None, None, None, ],
338
+ ["If the user approves of the app's running state you should provide a bash script that will automate all aspects of a local run and also a docker image for ease-of-launch in addition to the huggingface-ready app.py with all functions and gui and the requirements.txt file comprised of all required libraries and packages the application is dependent on, avoiding openai api at all points as we only use huggingface transformers, models, agents, libraries, api.", None, None, None, None, None, ],
339
+ ]
340
 
 
 
 
 
 
 
 
 
341
  def create_interface():
342
  with gr.Blocks() as iface:
343
  gr.ChatInterface(
 
346
  examples=examples,
347
  additional_inputs=additional_inputs,
348
  )
349
+ return iface
350
+
351
+ def chat_interface(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty):
352
+ return generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
353
+
354
+ iface = gr.Blocks()
355
+
356
+ with iface:
357
+ gr.Markdown("# Fragmixt\nAgents With Agents,\nSurf With a Purpose")
358
+
359
+ chatbot = gr.Chatbot()
360
+ msg = gr.Textbox()
361
+ clear = gr.Button("Clear")
362
+
363
+ agent_dropdown = gr.Dropdown(label="Agents", choices=agents, value=agents[0])
364
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1)
365
+ temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05)
366
+ max_new_tokens = gr.Slider(label="Max new tokens", value=1048 * 10, minimum=0, maximum=1048 * 10, step=64)
367
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05)
368
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05)
369
+
370
+ msg.submit(chat_interface, [msg, chatbot, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty], [chatbot, msg])
371
+ clear.click(lambda: None, None, chatbot, queue=False)
372
+
373
+ gr.Examples(examples, [msg, agent_dropdown, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty])
374
+
375
+ if __name__ == "__main__":
376
+ iface.launch(concurrency_limit=20)