Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,239 +1,480 @@
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
-
import streamlit as st
|
4 |
-
from transformers.pipelines import pipeline
|
5 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer, AutoModel, RagRetriever, AutoModelForSeq2SeqLM
|
6 |
-
import black
|
7 |
-
from pylint import lint
|
8 |
-
from io import StringIO
|
9 |
-
import sys
|
10 |
-
import torch
|
11 |
-
from huggingface_hub import hf_hub_url, cached_download, HfApi
|
12 |
-
from datetime import datetime
|
13 |
-
import requests
|
14 |
import random
|
15 |
-
from huggingface_hub
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
150 |
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import subprocess
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
import random
|
4 |
+
from huggingface_hub import InferenceClient
|
5 |
+
import gradio as gr
|
6 |
+
from i_search import google
|
7 |
+
from i_search import i_search as i_s
|
8 |
+
from agent import (
|
9 |
+
ACTION_PROMPT,
|
10 |
+
ADD_PROMPT,
|
11 |
+
COMPRESS_HISTORY_PROMPT,
|
12 |
+
LOG_PROMPT,
|
13 |
+
LOG_RESPONSE,
|
14 |
+
MODIFY_PROMPT,
|
15 |
+
PREFIX,
|
16 |
+
SEARCH_QUERY,
|
17 |
+
READ_PROMPT,
|
18 |
+
TASK_PROMPT,
|
19 |
+
UNDERSTAND_TEST_RESULTS_PROMPT,
|
20 |
+
)
|
21 |
+
from utils import parse_action, parse_file_content, read_python_module_structure
|
22 |
+
from datetime import datetime
|
23 |
+
now = datetime.now()
|
24 |
+
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
25 |
+
|
26 |
+
client = InferenceClient(
|
27 |
+
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
28 |
+
)
|
29 |
+
|
30 |
+
############################################
|
31 |
+
|
32 |
+
|
33 |
+
VERBOSE = True
|
34 |
+
MAX_HISTORY = 100
|
35 |
+
#MODEL = "gpt-3.5-turbo" # "gpt-4"
|
36 |
+
|
37 |
+
|
38 |
+
def format_prompt(message, history):
|
39 |
+
prompt = "<s>"
|
40 |
+
for user_prompt, bot_response in history:
|
41 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
42 |
+
prompt += f" {bot_response}</s> "
|
43 |
+
prompt += f"[INST] {message} [/INST]"
|
44 |
+
return prompt
|
45 |
+
|
46 |
+
|
47 |
|
48 |
+
def run_gpt(
|
49 |
+
prompt_template,
|
50 |
+
stop_tokens,
|
51 |
+
max_tokens,
|
52 |
+
module_summary,
|
53 |
+
purpose,
|
54 |
+
**prompt_kwargs,
|
55 |
+
):
|
56 |
+
seed = random.randint(1,1111111111111111)
|
57 |
+
|
58 |
+
generate_kwargs = dict(
|
59 |
+
temperature=0.9,
|
60 |
+
max_new_tokens=1048,
|
61 |
+
top_p=0.95,
|
62 |
+
repetition_penalty=1.0,
|
63 |
+
do_sample=True,
|
64 |
+
seed=seed,
|
65 |
+
)
|
66 |
+
|
67 |
+
|
68 |
+
content = PREFIX.format(
|
69 |
+
date_time_str=date_time_str,
|
70 |
+
purpose=purpose,
|
71 |
+
) + prompt_template.format(**prompt_kwargs)
|
72 |
+
if VERBOSE:
|
73 |
+
print(LOG_PROMPT.format(content))
|
74 |
+
|
75 |
+
|
76 |
+
#formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
77 |
+
#formatted_prompt = format_prompt(f'{content}', history)
|
78 |
+
|
79 |
+
stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
80 |
+
resp = ""
|
81 |
+
for response in stream:
|
82 |
+
resp += response.token.text
|
83 |
+
|
84 |
+
if VERBOSE:
|
85 |
+
print(LOG_RESPONSE.format(resp))
|
86 |
+
return resp
|
87 |
+
|
88 |
+
|
89 |
+
def compress_history(purpose, task, history, directory):
|
90 |
+
module_summary, _, _ = read_python_module_structure(directory)
|
91 |
+
resp = run_gpt(
|
92 |
+
COMPRESS_HISTORY_PROMPT,
|
93 |
+
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
94 |
+
max_tokens=512,
|
95 |
+
module_summary=module_summary,
|
96 |
+
purpose=purpose,
|
97 |
+
task=task,
|
98 |
+
history=history,
|
99 |
+
)
|
100 |
+
history = "observation: {}\n".format(resp)
|
101 |
+
return history
|
102 |
+
|
103 |
+
def call_search(purpose, task, history, directory, action_input):
|
104 |
+
print("CALLING SEARCH")
|
105 |
+
try:
|
106 |
+
if "http" in action_input:
|
107 |
+
if "<" in action_input:
|
108 |
+
action_input = action_input.strip("<")
|
109 |
+
if ">" in action_input:
|
110 |
+
action_input = action_input.strip(">")
|
111 |
+
response = i_s(action_input)
|
112 |
+
#response = google(search_return)
|
113 |
+
print(response)
|
114 |
+
history += "observation: search result is: {}\n".format(response)
|
115 |
+
else:
|
116 |
+
history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=URL'\n"
|
117 |
+
except Exception as e:
|
118 |
+
history += "observation: {}'\n".format(e)
|
119 |
+
return "MAIN", None, history, task
|
120 |
+
|
121 |
+
def call_main(purpose, task, history, directory, action_input):
|
122 |
+
module_summary, _, _ = read_python_module_structure(directory)
|
123 |
+
resp = run_gpt(
|
124 |
+
ACTION_PROMPT,
|
125 |
+
stop_tokens=["observation:", "task:"],
|
126 |
+
max_tokens=256,
|
127 |
+
module_summary=module_summary,
|
128 |
+
purpose=purpose,
|
129 |
+
task=task,
|
130 |
+
history=history,
|
131 |
+
)
|
132 |
+
lines = resp.strip().strip("\n").split("\n")
|
133 |
+
for line in lines:
|
134 |
+
if line == "":
|
135 |
+
continue
|
136 |
+
if line.startswith("thought: "):
|
137 |
+
history += "{}\n".format(line)
|
138 |
+
elif line.startswith("action: "):
|
139 |
+
|
140 |
+
action_name, action_input = parse_action(line)
|
141 |
+
print (f'ACTION_NAME :: {action_name}')
|
142 |
+
print (f'ACTION_INPUT :: {action_input}')
|
143 |
+
|
144 |
+
history += "{}\n".format(line)
|
145 |
+
if "COMPLETE" in action_name or "COMPLETE" in action_input:
|
146 |
+
task = "END"
|
147 |
+
return action_name, action_input, history, task
|
148 |
+
else:
|
149 |
+
return action_name, action_input, history, task
|
150 |
+
else:
|
151 |
+
history += "{}\n".format(line)
|
152 |
+
#history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
|
153 |
+
|
154 |
+
#return action_name, action_input, history, task
|
155 |
+
#assert False, "unknown action: {}".format(line)
|
156 |
+
return "MAIN", None, history, task
|
157 |
+
|
158 |
+
|
159 |
+
def call_test(purpose, task, history, directory, action_input):
|
160 |
+
result = subprocess.run(
|
161 |
+
["python", "-m", "pytest", "--collect-only", directory],
|
162 |
+
capture_output=True,
|
163 |
+
text=True,
|
164 |
+
)
|
165 |
+
if result.returncode != 0:
|
166 |
+
history += "observation: there are no tests! Test should be written in a test folder under {}\n".format(
|
167 |
+
directory
|
168 |
+
)
|
169 |
+
return "MAIN", None, history, task
|
170 |
+
result = subprocess.run(
|
171 |
+
["python", "-m", "pytest", directory], capture_output=True, text=True
|
172 |
+
)
|
173 |
+
if result.returncode == 0:
|
174 |
+
history += "observation: tests pass\n"
|
175 |
+
return "MAIN", None, history, task
|
176 |
+
module_summary, content, _ = read_python_module_structure(directory)
|
177 |
+
resp = run_gpt(
|
178 |
+
UNDERSTAND_TEST_RESULTS_PROMPT,
|
179 |
+
stop_tokens=[],
|
180 |
+
max_tokens=256,
|
181 |
+
module_summary=module_summary,
|
182 |
+
purpose=purpose,
|
183 |
+
task=task,
|
184 |
+
history=history,
|
185 |
+
stdout=result.stdout[:5000], # limit amount of text
|
186 |
+
stderr=result.stderr[:5000], # limit amount of text
|
187 |
+
)
|
188 |
+
history += "observation: tests failed: {}\n".format(resp)
|
189 |
+
return "MAIN", None, history, task
|
190 |
+
|
191 |
+
|
192 |
+
def call_set_task(purpose, task, history, directory, action_input):
|
193 |
+
module_summary, content, _ = read_python_module_structure(directory)
|
194 |
+
task = run_gpt(
|
195 |
+
TASK_PROMPT,
|
196 |
+
stop_tokens=[],
|
197 |
+
max_tokens=64,
|
198 |
+
module_summary=module_summary,
|
199 |
+
purpose=purpose,
|
200 |
+
task=task,
|
201 |
+
history=history,
|
202 |
+
).strip("\n")
|
203 |
+
history += "observation: task has been updated to: {}\n".format(task)
|
204 |
+
return "MAIN", None, history, task
|
205 |
+
|
206 |
+
|
207 |
+
def call_read(purpose, task, history, directory, action_input):
|
208 |
+
if not os.path.exists(action_input):
|
209 |
+
history += "observation: file does not exist\n"
|
210 |
+
return "MAIN", None, history, task
|
211 |
+
module_summary, content, _ = read_python_module_structure(directory)
|
212 |
+
f_content = (
|
213 |
+
content[action_input] if content[action_input] else "< document is empty >"
|
214 |
+
)
|
215 |
+
resp = run_gpt(
|
216 |
+
READ_PROMPT,
|
217 |
+
stop_tokens=[],
|
218 |
+
max_tokens=256,
|
219 |
+
module_summary=module_summary,
|
220 |
+
purpose=purpose,
|
221 |
+
task=task,
|
222 |
+
history=history,
|
223 |
+
file_path=action_input,
|
224 |
+
file_contents=f_content,
|
225 |
+
).strip("\n")
|
226 |
+
history += "observation: {}\n".format(resp)
|
227 |
+
return "MAIN", None, history, task
|
228 |
+
|
229 |
+
|
230 |
+
def call_modify(purpose, task, history, directory, action_input):
|
231 |
+
if not os.path.exists(action_input):
|
232 |
+
history += "observation: file does not exist\n"
|
233 |
+
return "MAIN", None, history, task
|
234 |
+
(
|
235 |
+
module_summary,
|
236 |
+
content,
|
237 |
+
_,
|
238 |
+
) = read_python_module_structure(directory)
|
239 |
+
f_content = (
|
240 |
+
content[action_input] if content[action_input] else "< document is empty >"
|
241 |
+
)
|
242 |
+
resp = run_gpt(
|
243 |
+
MODIFY_PROMPT,
|
244 |
+
stop_tokens=["action:", "thought:", "observation:"],
|
245 |
+
max_tokens=2048,
|
246 |
+
module_summary=module_summary,
|
247 |
+
purpose=purpose,
|
248 |
+
task=task,
|
249 |
+
history=history,
|
250 |
+
file_path=action_input,
|
251 |
+
file_contents=f_content,
|
252 |
+
)
|
253 |
+
new_contents, description = parse_file_content(resp)
|
254 |
+
if new_contents is None:
|
255 |
+
history += "observation: failed to modify file\n"
|
256 |
+
return "MAIN", None, history, task
|
257 |
+
|
258 |
+
with open(action_input, "w") as f:
|
259 |
+
f.write(new_contents)
|
260 |
+
|
261 |
+
history += "observation: file successfully modified\n"
|
262 |
+
history += "observation: {}\n".format(description)
|
263 |
+
return "MAIN", None, history, task
|
264 |
+
|
265 |
+
|
266 |
+
def call_add(purpose, task, history, directory, action_input):
|
267 |
+
d = os.path.dirname(action_input)
|
268 |
+
if not d.startswith(directory):
|
269 |
+
history += "observation: files must be under directory {}\n".format(directory)
|
270 |
+
elif not action_input.endswith(".py"):
|
271 |
+
history += "observation: can only write .py files\n"
|
272 |
+
else:
|
273 |
+
if d and not os.path.exists(d):
|
274 |
+
os.makedirs(d)
|
275 |
+
if not os.path.exists(action_input):
|
276 |
+
module_summary, _, _ = read_python_module_structure(directory)
|
277 |
+
resp = run_gpt(
|
278 |
+
ADD_PROMPT,
|
279 |
+
stop_tokens=["action:", "thought:", "observation:"],
|
280 |
+
max_tokens=2048,
|
281 |
+
module_summary=module_summary,
|
282 |
+
purpose=purpose,
|
283 |
+
task=task,
|
284 |
+
history=history,
|
285 |
+
file_path=action_input,
|
286 |
+
)
|
287 |
+
new_contents, description = parse_file_content(resp)
|
288 |
+
if new_contents is None:
|
289 |
+
history += "observation: failed to write file\n"
|
290 |
+
return "MAIN", None, history, task
|
291 |
+
|
292 |
+
with open(action_input, "w") as f:
|
293 |
+
f.write(new_contents)
|
294 |
+
|
295 |
+
history += "observation: file successfully written\n"
|
296 |
+
history += "obsertation: {}\n".format(description)
|
297 |
+
else:
|
298 |
+
history += "observation: file already exists\n"
|
299 |
+
return "MAIN", None, history, task
|
300 |
+
def end_fn(purpose, task, history, directory, action_input):
|
301 |
+
task = "END"
|
302 |
+
return "COMPLETE", None, history, task
|
303 |
+
NAME_TO_FUNC = {
|
304 |
+
"MAIN": call_main,
|
305 |
+
"UPDATE-TASK": call_set_task,
|
306 |
+
"SEARCH": call_search,
|
307 |
+
"COMPLETE": end_fn,
|
308 |
+
|
309 |
+
}
|
310 |
+
|
311 |
+
|
312 |
+
def run_action(purpose, task, history, directory, action_name, action_input):
|
313 |
+
if "RESPONSE" in action_name:
|
314 |
+
task="END"
|
315 |
+
return action_name, action_input, history, task
|
316 |
+
|
317 |
+
# compress the history when it is long
|
318 |
+
if len(history.split("\n")) > MAX_HISTORY:
|
319 |
+
if VERBOSE:
|
320 |
+
print("COMPRESSING HISTORY")
|
321 |
+
history = compress_history(purpose, task, history, directory)
|
322 |
+
|
323 |
+
assert action_name in NAME_TO_FUNC
|
324 |
+
|
325 |
+
print("RUN: ", action_name, action_input)
|
326 |
+
return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
|
327 |
+
|
328 |
+
|
329 |
+
def run(purpose,hist):
|
330 |
|
331 |
+
print(purpose)
|
332 |
+
print(hist)
|
333 |
+
task=None
|
334 |
+
directory="./"
|
335 |
+
history = ""
|
336 |
+
action_name = "UPDATE-TASK" if task is None else "MAIN"
|
337 |
+
action_input = None
|
338 |
+
while True:
|
339 |
+
print("")
|
340 |
+
print("")
|
341 |
+
print("---")
|
342 |
+
print("purpose:", purpose)
|
343 |
+
print("task:", task)
|
344 |
+
print("---")
|
345 |
+
print(history)
|
346 |
+
print("---")
|
347 |
+
|
348 |
+
action_name, action_input, history, task = run_action(
|
349 |
+
purpose,
|
350 |
+
task,
|
351 |
+
history,
|
352 |
+
directory,
|
353 |
+
action_name,
|
354 |
+
action_input,
|
355 |
+
)
|
356 |
+
if task == "END":
|
357 |
+
return history
|
358 |
+
|
359 |
+
|
360 |
+
|
361 |
+
################################################
|
362 |
+
|
363 |
+
def format_prompt(message, history):
|
364 |
+
prompt = "<s>"
|
365 |
+
for user_prompt, bot_response in history:
|
366 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
367 |
+
prompt += f" {bot_response}</s> "
|
368 |
+
prompt += f"[INST] {message} [/INST]"
|
369 |
+
return prompt
|
370 |
+
agents =[
|
371 |
+
"WEB_DEV",
|
372 |
+
"AI_SYSTEM_PROMPT",
|
373 |
+
"PYTHON_CODE_DEV"
|
374 |
+
]
|
375 |
+
def generate(
|
376 |
+
prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
|
377 |
+
):
|
378 |
+
seed = random.randint(1,1111111111111111)
|
379 |
+
|
380 |
+
agent=prompts.WEB_DEV
|
381 |
+
if agent_name == "WEB_DEV":
|
382 |
+
agent = prompts.WEB_DEV
|
383 |
+
if agent_name == "AI_SYSTEM_PROMPT":
|
384 |
+
agent = prompts.AI_SYSTEM_PROMPT
|
385 |
+
if agent_name == "PYTHON_CODE_DEV":
|
386 |
+
agent = prompts.PYTHON_CODE_DEV
|
387 |
+
system_prompt=agent
|
388 |
+
temperature = float(temperature)
|
389 |
+
if temperature < 1e-2:
|
390 |
+
temperature = 1e-2
|
391 |
+
top_p = float(top_p)
|
392 |
+
|
393 |
+
generate_kwargs = dict(
|
394 |
+
temperature=temperature,
|
395 |
+
max_new_tokens=max_new_tokens,
|
396 |
+
top_p=top_p,
|
397 |
+
repetition_penalty=repetition_penalty,
|
398 |
+
do_sample=True,
|
399 |
+
seed=seed,
|
400 |
+
)
|
401 |
+
|
402 |
+
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
403 |
+
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
404 |
+
output = ""
|
405 |
+
|
406 |
+
for response in stream:
|
407 |
+
output += response.token.text
|
408 |
+
yield output
|
409 |
+
return output
|
410 |
+
|
411 |
+
|
412 |
+
additional_inputs=[
|
413 |
+
gr.Dropdown(
|
414 |
+
label="Agents",
|
415 |
+
choices=[s for s in agents],
|
416 |
+
value=agents[0],
|
417 |
+
interactive=True,
|
418 |
+
),
|
419 |
+
gr.Textbox(
|
420 |
+
label="System Prompt",
|
421 |
+
max_lines=1,
|
422 |
+
interactive=True,
|
423 |
+
),
|
424 |
+
gr.Slider(
|
425 |
+
label="Temperature",
|
426 |
+
value=0.9,
|
427 |
+
minimum=0.0,
|
428 |
+
maximum=1.0,
|
429 |
+
step=0.05,
|
430 |
+
interactive=True,
|
431 |
+
info="Higher values produce more diverse outputs",
|
432 |
+
),
|
433 |
+
|
434 |
+
gr.Slider(
|
435 |
+
label="Max new tokens",
|
436 |
+
value=1048*10,
|
437 |
+
minimum=0,
|
438 |
+
maximum=1048*10,
|
439 |
+
step=64,
|
440 |
+
interactive=True,
|
441 |
+
info="The maximum numbers of new tokens",
|
442 |
+
),
|
443 |
+
gr.Slider(
|
444 |
+
label="Top-p (nucleus sampling)",
|
445 |
+
value=0.90,
|
446 |
+
minimum=0.0,
|
447 |
+
maximum=1,
|
448 |
+
step=0.05,
|
449 |
+
interactive=True,
|
450 |
+
info="Higher values sample more low-probability tokens",
|
451 |
+
),
|
452 |
+
gr.Slider(
|
453 |
+
label="Repetition penalty",
|
454 |
+
value=1.2,
|
455 |
+
minimum=1.0,
|
456 |
+
maximum=2.0,
|
457 |
+
step=0.05,
|
458 |
+
interactive=True,
|
459 |
+
info="Penalize repeated tokens",
|
460 |
+
),
|
461 |
+
|
462 |
+
|
463 |
+
]
|
464 |
+
|
465 |
+
examples=[["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
|
466 |
+
["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
|
467 |
+
["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None,],
|
468 |
+
["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None,],
|
469 |
+
["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None,],
|
470 |
+
["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
|
471 |
+
]
|
472 |
+
|
473 |
+
|
474 |
+
gr.ChatInterface(
|
475 |
+
fn=run,
|
476 |
+
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
477 |
+
title="Mixtral 46.7B\nMicro-Agent\nInternet Search",
|
478 |
+
examples=examples,
|
479 |
+
concurrency_limit=20,
|
480 |
+
).launch(show_api=False)
|