johann22 commited on
Commit
7933672
·
1 Parent(s): 7af09bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -36
app.py CHANGED
@@ -51,15 +51,15 @@ def run_gpt(
51
  stop_tokens,
52
  max_tokens,
53
  purpose,
54
- **prompt_kwargs,
55
  ):
56
  seed = random.randint(1,1111111111111111)
57
  print (seed)
58
  generate_kwargs = dict(
59
- temperature=1.0,
60
  max_new_tokens=1048,
61
- top_p=0.99,
62
- repetition_penalty=1.0,
63
  do_sample=True,
64
  seed=seed,
65
  )
@@ -87,19 +87,19 @@ def run_gpt(
87
  return resp
88
 
89
 
90
- def compress_history(purpose, task, history, directory):
91
  resp = run_gpt(
92
  COMPRESS_HISTORY_PROMPT,
93
  stop_tokens=["observation:", "task:", "action:", "thought:"],
94
  max_tokens=512,
95
  purpose=purpose,
96
  task=task,
97
- history=history,
98
  )
99
  history = "observation: {}\n".format(resp)
100
  return history
101
 
102
- def call_search(purpose, task, history, directory, action_input):
103
  print("CALLING SEARCH")
104
  try:
105
 
@@ -119,14 +119,14 @@ def call_search(purpose, task, history, directory, action_input):
119
  history += "observation: {}'\n".format(e)
120
  return "MAIN", None, history, task
121
 
122
- def call_main(purpose, task, history, directory, action_input):
123
  resp = run_gpt(
124
  ACTION_PROMPT,
125
  stop_tokens=["observation:", "task:"],
126
  max_tokens=1048,
127
  purpose=purpose,
128
  task=task,
129
- history=history,
130
  )
131
  lines = resp.strip().strip("\n").split("\n")
132
  for line in lines:
@@ -155,14 +155,14 @@ def call_main(purpose, task, history, directory, action_input):
155
  return "MAIN", None, history, task
156
 
157
 
158
- def call_set_task(purpose, task, history, directory, action_input):
159
  task = run_gpt(
160
  TASK_PROMPT,
161
  stop_tokens=[],
162
  max_tokens=64,
163
  purpose=purpose,
164
  task=task,
165
- history=history,
166
  ).strip("\n")
167
  history += "observation: task has been updated to: {}\n".format(task)
168
  return "MAIN", None, history, task
@@ -179,7 +179,7 @@ NAME_TO_FUNC = {
179
  }
180
 
181
 
182
- def run_action(purpose, task, history, directory, action_name, action_input):
183
  try:
184
  if "RESPONSE" in action_name or "COMPLETE" in action_name:
185
  action_name="COMPLETE"
@@ -198,13 +198,13 @@ def run_action(purpose, task, history, directory, action_name, action_input):
198
  assert action_name in NAME_TO_FUNC
199
 
200
  print("RUN: ", action_name, action_input)
201
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
202
  except Exception as e:
203
  history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
204
 
205
  return "MAIN", None, history, task
206
 
207
- def run(purpose,history):
208
 
209
  #print(purpose)
210
  #print(hist)
@@ -233,7 +233,7 @@ def run(purpose,history):
233
  history,
234
  directory,
235
  action_name,
236
- action_input,
237
  )
238
  yield (history)
239
  #yield ("",[(purpose,history)])
@@ -295,17 +295,6 @@ def generate(
295
 
296
 
297
  additional_inputs=[
298
- gr.Dropdown(
299
- label="Agents",
300
- choices=[s for s in agents],
301
- value=agents[0],
302
- interactive=True,
303
- ),
304
- gr.Textbox(
305
- label="System Prompt",
306
- max_lines=1,
307
- interactive=True,
308
- ),
309
  gr.Slider(
310
  label="Temperature",
311
  value=0.9,
@@ -315,16 +304,6 @@ additional_inputs=[
315
  interactive=True,
316
  info="Higher values produce more diverse outputs",
317
  ),
318
-
319
- gr.Slider(
320
- label="Max new tokens",
321
- value=1048*10,
322
- minimum=0,
323
- maximum=1048*10,
324
- step=64,
325
- interactive=True,
326
- info="The maximum numbers of new tokens",
327
- ),
328
  gr.Slider(
329
  label="Top-p (nucleus sampling)",
330
  value=0.90,
@@ -383,6 +362,7 @@ iface.launch()
383
  '''
384
  gr.ChatInterface(
385
  fn=run,
 
386
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
387
  title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
388
  examples=examples,
 
51
  stop_tokens,
52
  max_tokens,
53
  purpose,
54
+ **prompt_kwargs,temperature=0.9, top_p=0.95, repetition_penalty=1.0,
55
  ):
56
  seed = random.randint(1,1111111111111111)
57
  print (seed)
58
  generate_kwargs = dict(
59
+ temperature=temperature,
60
  max_new_tokens=1048,
61
+ top_p=top_p,
62
+ repetition_penalty=repetition_penalty,
63
  do_sample=True,
64
  seed=seed,
65
  )
 
87
  return resp
88
 
89
 
90
+ def compress_history(purpose, task, history, directory,temperature=0.9, top_p=0.95, repetition_penalty=1.0,):
91
  resp = run_gpt(
92
  COMPRESS_HISTORY_PROMPT,
93
  stop_tokens=["observation:", "task:", "action:", "thought:"],
94
  max_tokens=512,
95
  purpose=purpose,
96
  task=task,
97
+ history=history,temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,
98
  )
99
  history = "observation: {}\n".format(resp)
100
  return history
101
 
102
+ def call_search(purpose, task, history, directory, action_input,temperature=0.9, top_p=0.95, repetition_penalty=1.0,):
103
  print("CALLING SEARCH")
104
  try:
105
 
 
119
  history += "observation: {}'\n".format(e)
120
  return "MAIN", None, history, task
121
 
122
+ def call_main(purpose, task, history, directory, action_input,temperature=0.9, top_p=0.95, repetition_penalty=1.0,):
123
  resp = run_gpt(
124
  ACTION_PROMPT,
125
  stop_tokens=["observation:", "task:"],
126
  max_tokens=1048,
127
  purpose=purpose,
128
  task=task,
129
+ history=history,temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,
130
  )
131
  lines = resp.strip().strip("\n").split("\n")
132
  for line in lines:
 
155
  return "MAIN", None, history, task
156
 
157
 
158
+ def call_set_task(purpose, task, history, directory, action_input,temperature=0.9, top_p=0.95, repetition_penalty=1.0,):
159
  task = run_gpt(
160
  TASK_PROMPT,
161
  stop_tokens=[],
162
  max_tokens=64,
163
  purpose=purpose,
164
  task=task,
165
+ history=history,temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,
166
  ).strip("\n")
167
  history += "observation: task has been updated to: {}\n".format(task)
168
  return "MAIN", None, history, task
 
179
  }
180
 
181
 
182
+ def run_action(purpose, task, history, directory, action_name, action_input,temperature=0.9, top_p=0.95, repetition_penalty=1.0,):
183
  try:
184
  if "RESPONSE" in action_name or "COMPLETE" in action_name:
185
  action_name="COMPLETE"
 
198
  assert action_name in NAME_TO_FUNC
199
 
200
  print("RUN: ", action_name, action_input)
201
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input,temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,)
202
  except Exception as e:
203
  history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
204
 
205
  return "MAIN", None, history, task
206
 
207
+ def run(purpose,history,temperature=0.9, top_p=0.95, repetition_penalty=1.0,):
208
 
209
  #print(purpose)
210
  #print(hist)
 
233
  history,
234
  directory,
235
  action_name,
236
+ action_input,temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty,
237
  )
238
  yield (history)
239
  #yield ("",[(purpose,history)])
 
295
 
296
 
297
  additional_inputs=[
 
 
 
 
 
 
 
 
 
 
 
298
  gr.Slider(
299
  label="Temperature",
300
  value=0.9,
 
304
  interactive=True,
305
  info="Higher values produce more diverse outputs",
306
  ),
 
 
 
 
 
 
 
 
 
 
307
  gr.Slider(
308
  label="Top-p (nucleus sampling)",
309
  value=0.90,
 
362
  '''
363
  gr.ChatInterface(
364
  fn=run,
365
+ additional_inputs=additional_inputs,
366
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
367
  title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
368
  examples=examples,