Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,10 @@
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
import random
|
4 |
-
from
|
5 |
import gradio as gr
|
6 |
from safe_search import safe_search
|
7 |
-
from i_search import google
|
8 |
-
from i_search import i_search as i_s
|
9 |
from agent import (
|
10 |
ACTION_PROMPT,
|
11 |
ADD_PROMPT,
|
@@ -21,21 +20,6 @@ from agent import (
|
|
21 |
)
|
22 |
from utils import parse_action, parse_file_content, read_python_module_structure
|
23 |
from datetime import datetime
|
24 |
-
now = datetime.now()
|
25 |
-
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
26 |
-
|
27 |
-
client = InferenceClient(
|
28 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
29 |
-
)
|
30 |
-
|
31 |
-
############################################
|
32 |
-
|
33 |
-
|
34 |
-
VERBOSE = False
|
35 |
-
MAX_HISTORY = 5
|
36 |
-
#MODEL = "gpt-3.5-turbo" # "gpt-4"
|
37 |
-
|
38 |
-
TASK_PROMPT = """Task: {task}\nHistory:\n{history}"""
|
39 |
|
40 |
def format_prompt(message, history):
|
41 |
prompt = "<s>"
|
@@ -45,8 +29,6 @@ def format_prompt(message, history):
|
|
45 |
prompt += f"[INST] {message} [/INST]"
|
46 |
return prompt
|
47 |
|
48 |
-
|
49 |
-
|
50 |
def run_gpt(
|
51 |
prompt_template,
|
52 |
stop_tokens,
|
@@ -65,25 +47,19 @@ def run_gpt(
|
|
65 |
seed=seed,
|
66 |
)
|
67 |
|
68 |
-
|
69 |
content = PREFIX.format(
|
70 |
-
date_time_str=
|
71 |
purpose=purpose,
|
72 |
safe_search=safe_search,
|
73 |
) + prompt_template.format(**prompt_kwargs)
|
74 |
-
if
|
75 |
print(LOG_PROMPT.format(content))
|
76 |
-
|
77 |
-
|
78 |
-
#formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
79 |
-
#formatted_prompt = format_prompt(f'{content}', history)
|
80 |
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
resp += response.token.text
|
85 |
|
86 |
-
if
|
87 |
print(LOG_RESPONSE.format(resp))
|
88 |
return resp
|
89 |
|
@@ -97,25 +73,24 @@ def compress_history(purpose, task, history, directory):
|
|
97 |
task=task,
|
98 |
history=history,
|
99 |
)
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
def call_search(purpose, task, history, directory, action_input):
|
104 |
print("CALLING SEARCH")
|
105 |
try:
|
106 |
-
|
107 |
if "http" in action_input:
|
108 |
if "<" in action_input:
|
109 |
-
action_input = action_input.
|
110 |
if ">" in action_input:
|
111 |
-
action_input = action_input.
|
112 |
-
|
113 |
response = i_s(action_input)
|
114 |
#response = google(search_return)
|
115 |
print(response)
|
116 |
history += "observation: search result is: {}\n".format(response)
|
117 |
else:
|
118 |
-
history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=
|
119 |
except Exception as e:
|
120 |
history += "observation: {}'\n".format(e)
|
121 |
return "MAIN", None, history, task
|
@@ -136,21 +111,21 @@ def call_main(purpose, task, history, directory, action_input):
|
|
136 |
if line.startswith("thought: "):
|
137 |
history += "{}\n".format(line)
|
138 |
elif line.startswith("action: "):
|
139 |
-
|
140 |
-
action_name, action_input =
|
141 |
print (f'ACTION_NAME :: {action_name}')
|
142 |
print (f'ACTION_INPUT :: {action_input}')
|
143 |
-
|
144 |
history += "{}\n".format(line)
|
145 |
if "COMPLETE" in action_name or "COMPLETE" in action_input:
|
146 |
-
task = "
|
147 |
return action_name, action_input, history, task
|
148 |
else:
|
149 |
return action_name, action_input, history, task
|
150 |
else:
|
151 |
history += "{}\n".format(line)
|
152 |
#history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
|
153 |
-
|
154 |
#return action_name, action_input, history, task
|
155 |
#assert False, "unknown action: {}".format(line)
|
156 |
return "MAIN", None, history, task
|
@@ -169,7 +144,7 @@ def call_set_task(purpose, task, history, directory, action_input):
|
|
169 |
return "MAIN", None, history, task
|
170 |
|
171 |
def end_fn(purpose, task, history, directory, action_input):
|
172 |
-
task = "
|
173 |
return "COMPLETE", "COMPLETE", history, task
|
174 |
|
175 |
NAME_TO_FUNC = {
|
@@ -184,21 +159,21 @@ def run_action(purpose, task, history, directory, action_name, action_input):
|
|
184 |
print(f'action_name::{action_name}')
|
185 |
try:
|
186 |
if "RESPONSE" in action_name or "COMPLETE" in action_name:
|
187 |
-
action_name=
|
188 |
-
task="
|
189 |
return action_name, "COMPLETE", history, task
|
190 |
-
|
191 |
# compress the history when it is long
|
192 |
-
if len(history.split("\n")) >
|
193 |
-
if
|
194 |
print("COMPRESSING HISTORY")
|
195 |
history = compress_history(purpose, task, history, directory)
|
196 |
if not action_name in NAME_TO_FUNC:
|
197 |
-
action_name=
|
198 |
-
if action_name == ""
|
199 |
-
action_name=
|
200 |
assert action_name in NAME_TO_FUNC
|
201 |
-
|
202 |
print("RUN: ", action_name, action_input)
|
203 |
return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
|
204 |
except Exception as e:
|
@@ -207,18 +182,18 @@ def run_action(purpose, task, history, directory, action_name, action_input):
|
|
207 |
return "MAIN", None, history, task
|
208 |
|
209 |
def run(purpose,history):
|
210 |
-
|
211 |
#print(purpose)
|
212 |
#print(hist)
|
213 |
-
task=
|
214 |
-
directory="
|
215 |
if history:
|
216 |
-
history=
|
217 |
if not history:
|
218 |
history = ""
|
219 |
-
|
220 |
-
action_name = "
|
221 |
-
action_input =
|
222 |
while True:
|
223 |
print("")
|
224 |
print("")
|
@@ -239,21 +214,10 @@ def run(purpose,history):
|
|
239 |
)
|
240 |
yield (history)
|
241 |
#yield ("",[(purpose,history)])
|
242 |
-
if task == "
|
243 |
return (history)
|
244 |
#return ("", [(purpose,history)])
|
245 |
|
246 |
-
|
247 |
-
|
248 |
-
################################################
|
249 |
-
|
250 |
-
def format_prompt(message, history):
|
251 |
-
prompt = "<s>"
|
252 |
-
for user_prompt, bot_response in history:
|
253 |
-
prompt += f"[INST] {user_prompt} [/INST]"
|
254 |
-
prompt += f" {bot_response}</s> "
|
255 |
-
prompt += f"[INST] {message} [/INST]"
|
256 |
-
return prompt
|
257 |
agents =[
|
258 |
"WEB_DEV",
|
259 |
"AI_SYSTEM_PROMPT",
|
@@ -270,7 +234,7 @@ def generate(
|
|
270 |
if agent_name == "AI_SYSTEM_PROMPT":
|
271 |
agent = prompts.AI_SYSTEM_PROMPT
|
272 |
if agent_name == "PYTHON_CODE_DEV":
|
273 |
-
agent = prompts.PYTHON_CODE_DEV
|
274 |
system_prompt=agent
|
275 |
temperature = float(temperature)
|
276 |
if temperature < 1e-2:
|
@@ -287,12 +251,10 @@ def generate(
|
|
287 |
)
|
288 |
|
289 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
290 |
-
|
291 |
-
|
|
|
292 |
|
293 |
-
for response in stream:
|
294 |
-
output += response.token.text
|
295 |
-
yield output
|
296 |
return output
|
297 |
|
298 |
|
@@ -359,28 +321,6 @@ examples=[["What are the biggest news stories today?", None, None, None, None, N
|
|
359 |
["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
|
360 |
]
|
361 |
|
362 |
-
'''
|
363 |
-
gr.ChatInterface(
|
364 |
-
fn=run,
|
365 |
-
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
366 |
-
title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
|
367 |
-
examples=examples,
|
368 |
-
concurrency_limit=20,
|
369 |
-
with gr.Blocks() as ifacea:
|
370 |
-
gr.HTML("""TEST""")
|
371 |
-
ifacea.launch()
|
372 |
-
).launch()
|
373 |
-
with gr.Blocks() as iface:
|
374 |
-
#chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
375 |
-
chatbot=gr.Chatbot()
|
376 |
-
msg = gr.Textbox()
|
377 |
-
with gr.Row():
|
378 |
-
submit_b = gr.Button()
|
379 |
-
clear = gr.ClearButton([msg, chatbot])
|
380 |
-
submit_b.click(run, [msg,chatbot],[msg,chatbot])
|
381 |
-
msg.submit(run, [msg, chatbot], [msg, chatbot])
|
382 |
-
iface.launch()
|
383 |
-
'''
|
384 |
gr.ChatInterface(
|
385 |
fn=run,
|
386 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
import random
|
4 |
+
from transformers import pipeline
|
5 |
import gradio as gr
|
6 |
from safe_search import safe_search
|
7 |
+
from i_search import google, i_search as i_s
|
|
|
8 |
from agent import (
|
9 |
ACTION_PROMPT,
|
10 |
ADD_PROMPT,
|
|
|
20 |
)
|
21 |
from utils import parse_action, parse_file_content, read_python_module_structure
|
22 |
from datetime import datetime
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
|
24 |
def format_prompt(message, history):
|
25 |
prompt = "<s>"
|
|
|
29 |
prompt += f"[INST] {message} [/INST]"
|
30 |
return prompt
|
31 |
|
|
|
|
|
32 |
def run_gpt(
|
33 |
prompt_template,
|
34 |
stop_tokens,
|
|
|
47 |
seed=seed,
|
48 |
)
|
49 |
|
|
|
50 |
content = PREFIX.format(
|
51 |
+
date_time_str=datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
|
52 |
purpose=purpose,
|
53 |
safe_search=safe_search,
|
54 |
) + prompt_template.format(**prompt_kwargs)
|
55 |
+
if True:
|
56 |
print(LOG_PROMPT.format(content))
|
|
|
|
|
|
|
|
|
57 |
|
58 |
+
model = pipeline('text-generation', model='microsoft/DialoGPT-small')
|
59 |
+
response = model(content, max_length=max_tokens, temperature=1.0)
|
60 |
+
resp = response[0]['generated_text']
|
|
|
61 |
|
62 |
+
if True:
|
63 |
print(LOG_RESPONSE.format(resp))
|
64 |
return resp
|
65 |
|
|
|
73 |
task=task,
|
74 |
history=history,
|
75 |
)
|
76 |
+
return resp
|
77 |
+
|
|
|
78 |
def call_search(purpose, task, history, directory, action_input):
|
79 |
print("CALLING SEARCH")
|
80 |
try:
|
81 |
+
|
82 |
if "http" in action_input:
|
83 |
if "<" in action_input:
|
84 |
+
action_input = action_input.replace("<", "")
|
85 |
if ">" in action_input:
|
86 |
+
action_input = action_input.replace(">", "")
|
87 |
+
|
88 |
response = i_s(action_input)
|
89 |
#response = google(search_return)
|
90 |
print(response)
|
91 |
history += "observation: search result is: {}\n".format(response)
|
92 |
else:
|
93 |
+
history += "observation: I need to provide a valid URL to 'action: SEARCH action_input = {}\n".format(action_input)
|
94 |
except Exception as e:
|
95 |
history += "observation: {}'\n".format(e)
|
96 |
return "MAIN", None, history, task
|
|
|
111 |
if line.startswith("thought: "):
|
112 |
history += "{}\n".format(line)
|
113 |
elif line.startswith("action: "):
|
114 |
+
|
115 |
+
action_name, action_input = line.split(": ")
|
116 |
print (f'ACTION_NAME :: {action_name}')
|
117 |
print (f'ACTION_INPUT :: {action_input}')
|
118 |
+
|
119 |
history += "{}\n".format(line)
|
120 |
if "COMPLETE" in action_name or "COMPLETE" in action_input:
|
121 |
+
task = "COMPLETE"
|
122 |
return action_name, action_input, history, task
|
123 |
else:
|
124 |
return action_name, action_input, history, task
|
125 |
else:
|
126 |
history += "{}\n".format(line)
|
127 |
#history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
|
128 |
+
|
129 |
#return action_name, action_input, history, task
|
130 |
#assert False, "unknown action: {}".format(line)
|
131 |
return "MAIN", None, history, task
|
|
|
144 |
return "MAIN", None, history, task
|
145 |
|
146 |
def end_fn(purpose, task, history, directory, action_input):
|
147 |
+
task = "COMPLETE"
|
148 |
return "COMPLETE", "COMPLETE", history, task
|
149 |
|
150 |
NAME_TO_FUNC = {
|
|
|
159 |
print(f'action_name::{action_name}')
|
160 |
try:
|
161 |
if "RESPONSE" in action_name or "COMPLETE" in action_name:
|
162 |
+
action_name = action_name
|
163 |
+
task = "COMPLETE"
|
164 |
return action_name, "COMPLETE", history, task
|
165 |
+
|
166 |
# compress the history when it is long
|
167 |
+
if len(history.split("\n")) > 5:
|
168 |
+
if True:
|
169 |
print("COMPRESSING HISTORY")
|
170 |
history = compress_history(purpose, task, history, directory)
|
171 |
if not action_name in NAME_TO_FUNC:
|
172 |
+
action_name = action_name
|
173 |
+
if action_name == "MAIN":
|
174 |
+
action_name = action_name
|
175 |
assert action_name in NAME_TO_FUNC
|
176 |
+
|
177 |
print("RUN: ", action_name, action_input)
|
178 |
return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
|
179 |
except Exception as e:
|
|
|
182 |
return "MAIN", None, history, task
|
183 |
|
184 |
def run(purpose,history):
|
185 |
+
|
186 |
#print(purpose)
|
187 |
#print(hist)
|
188 |
+
task = "MAIN"
|
189 |
+
directory = "MAIN"
|
190 |
if history:
|
191 |
+
history = history
|
192 |
if not history:
|
193 |
history = ""
|
194 |
+
|
195 |
+
action_name = "MAIN"
|
196 |
+
action_input = ""
|
197 |
while True:
|
198 |
print("")
|
199 |
print("")
|
|
|
214 |
)
|
215 |
yield (history)
|
216 |
#yield ("",[(purpose,history)])
|
217 |
+
if task == "COMPLETE":
|
218 |
return (history)
|
219 |
#return ("", [(purpose,history)])
|
220 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
221 |
agents =[
|
222 |
"WEB_DEV",
|
223 |
"AI_SYSTEM_PROMPT",
|
|
|
234 |
if agent_name == "AI_SYSTEM_PROMPT":
|
235 |
agent = prompts.AI_SYSTEM_PROMPT
|
236 |
if agent_name == "PYTHON_CODE_DEV":
|
237 |
+
agent = prompts.PYTHON_CODE_DEV
|
238 |
system_prompt=agent
|
239 |
temperature = float(temperature)
|
240 |
if temperature < 1e-2:
|
|
|
251 |
)
|
252 |
|
253 |
formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
254 |
+
model = pipeline('text-generation', model='microsoft/DialoGPT-small')
|
255 |
+
response = model(formatted_prompt, max_length=1024, temperature=1.0)
|
256 |
+
output = response[0]['generated_text']
|
257 |
|
|
|
|
|
|
|
258 |
return output
|
259 |
|
260 |
|
|
|
321 |
["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
|
322 |
]
|
323 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
gr.ChatInterface(
|
325 |
fn=run,
|
326 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|