Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -13,9 +13,7 @@ import json
|
|
13 |
now = datetime.now()
|
14 |
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
15 |
|
16 |
-
client = InferenceClient(
|
17 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1"
|
18 |
-
)
|
19 |
|
20 |
# --- Set up logging ---
|
21 |
logging.basicConfig(
|
@@ -24,16 +22,14 @@ logging.basicConfig(
|
|
24 |
format="%(asctime)s - %(levelname)s - %(message)s",
|
25 |
)
|
26 |
|
27 |
-
agents =[
|
28 |
"WEB_DEV",
|
29 |
"AI_SYSTEM_PROMPT",
|
30 |
"PYTHON_CODE_DEV"
|
31 |
]
|
32 |
-
############################################
|
33 |
|
34 |
VERBOSE = True
|
35 |
MAX_HISTORY = 5
|
36 |
-
#MODEL = "gpt-3.5-turbo" # "gpt-4"
|
37 |
|
38 |
PREFIX = """
|
39 |
{date_time_str}
|
@@ -80,13 +76,12 @@ thought:
|
|
80 |
"""
|
81 |
|
82 |
def format_prompt(message, history, max_history_turns=2):
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
prompt += f" {
|
88 |
-
|
89 |
-
return prompt
|
90 |
|
91 |
def run_gpt(
|
92 |
prompt_template,
|
@@ -95,9 +90,9 @@ def run_gpt(
|
|
95 |
purpose,
|
96 |
**prompt_kwargs,
|
97 |
):
|
98 |
-
seed = random.randint(1,1111111111111111)
|
99 |
logging.info(f"Seed: {seed}") # Log the seed
|
100 |
-
|
101 |
content = PREFIX.format(
|
102 |
date_time_str=date_time_str,
|
103 |
purpose=purpose,
|
@@ -105,22 +100,31 @@ def run_gpt(
|
|
105 |
) + prompt_template.format(**prompt_kwargs)
|
106 |
if VERBOSE:
|
107 |
logging.info(LOG_PROMPT.format(content)) # Log the prompt
|
108 |
-
|
109 |
resp = client.text_generation(content, max_new_tokens=max_tokens, stop_sequences=stop_tokens, temperature=0.7, top_p=0.8, repetition_penalty=1.5)
|
110 |
if VERBOSE:
|
111 |
logging.info(LOG_RESPONSE.format(resp)) # Log the response
|
112 |
return resp
|
113 |
|
114 |
-
def generate(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
content = PREFIX.format(
|
116 |
date_time_str=date_time_str,
|
117 |
purpose=purpose,
|
118 |
safe_search=safe_search,
|
119 |
-
) +
|
120 |
if VERBOSE:
|
121 |
logging.info(LOG_PROMPT.format(content)) # Log the prompt
|
122 |
-
|
123 |
-
stream = client.text_generation(content,
|
124 |
resp = ""
|
125 |
for response in stream:
|
126 |
resp += response.token.text
|
@@ -129,7 +133,6 @@ def generate( prompt, history, agent_name=agents[0], sys_prompt="", temperature=
|
|
129 |
logging.info(LOG_RESPONSE.format(resp)) # Log the response
|
130 |
return resp
|
131 |
|
132 |
-
|
133 |
def compress_history(purpose, task, history, directory):
|
134 |
resp = run_gpt(
|
135 |
COMPRESS_HISTORY_PROMPT,
|
@@ -141,19 +144,13 @@ def compress_history(purpose, task, history, directory):
|
|
141 |
)
|
142 |
history = "observation: {}\n".format(resp)
|
143 |
return history
|
144 |
-
|
145 |
def call_search(purpose, task, history, directory, action_input):
|
146 |
logging.info(f"CALLING SEARCH: {action_input}")
|
147 |
try:
|
148 |
-
|
149 |
if "http" in action_input:
|
150 |
-
|
151 |
-
action_input = action_input.strip("<")
|
152 |
-
if ">" in action_input:
|
153 |
-
action_input = action_input.strip(">")
|
154 |
-
|
155 |
response = i_s(action_input)
|
156 |
-
#response = google(search_return)
|
157 |
logging.info(f"Search Result: {response}")
|
158 |
history += "observation: search result is: {}\n".format(response)
|
159 |
else:
|
@@ -166,13 +163,13 @@ def call_main(purpose, task, history, directory, action_input):
|
|
166 |
logging.info(f"CALLING MAIN: {action_input}")
|
167 |
resp = run_gpt(
|
168 |
ACTION_PROMPT,
|
169 |
-
stop_tokens=["observation:", "task:", "action:","thought:"],
|
170 |
max_tokens=32000,
|
171 |
purpose=purpose,
|
172 |
task=task,
|
173 |
history=history,
|
174 |
)
|
175 |
-
lines = resp.strip().
|
176 |
for line in lines:
|
177 |
if line == "":
|
178 |
continue
|
@@ -180,7 +177,6 @@ def call_main(purpose, task, history, directory, action_input):
|
|
180 |
history += "{}\n".format(line)
|
181 |
logging.info(f"Thought: {line}")
|
182 |
elif line.startswith("action: "):
|
183 |
-
|
184 |
action_name, action_input = parse_action(line)
|
185 |
logging.info(f"Action: {action_name} - {action_input}")
|
186 |
history += "{}\n".format(line)
|
@@ -192,13 +188,8 @@ def call_main(purpose, task, history, directory, action_input):
|
|
192 |
else:
|
193 |
history += "{}\n".format(line)
|
194 |
logging.info(f"Other Output: {line}")
|
195 |
-
#history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
|
196 |
-
|
197 |
-
#return action_name, action_input, history, task
|
198 |
-
#assert False, "unknown action: {}".format(line)
|
199 |
return "MAIN", None, history, task
|
200 |
|
201 |
-
|
202 |
def call_set_task(purpose, task, history, directory, action_input):
|
203 |
logging.info(f"CALLING SET_TASK: {action_input}")
|
204 |
task = run_gpt(
|
@@ -222,27 +213,26 @@ NAME_TO_FUNC = {
|
|
222 |
"UPDATE-TASK": call_set_task,
|
223 |
"SEARCH": call_search,
|
224 |
"COMPLETE": end_fn,
|
225 |
-
|
226 |
}
|
227 |
|
228 |
def run_action(purpose, task, history, directory, action_name, action_input):
|
229 |
logging.info(f"RUNNING ACTION: {action_name} - {action_input}")
|
230 |
try:
|
231 |
if "RESPONSE" in action_name or "COMPLETE" in action_name:
|
232 |
-
action_name="COMPLETE"
|
233 |
-
task="END"
|
234 |
return action_name, "COMPLETE", history, task
|
235 |
-
|
236 |
# compress the history when it is long
|
237 |
if len(history.split("\n")) > MAX_HISTORY:
|
238 |
logging.info("COMPRESSING HISTORY")
|
239 |
history = compress_history(purpose, task, history, directory)
|
240 |
-
if not
|
241 |
-
action_name="MAIN"
|
242 |
-
if action_name == "" or action_name
|
243 |
-
action_name="MAIN"
|
244 |
assert action_name in NAME_TO_FUNC
|
245 |
-
|
246 |
logging.info(f"RUN: {action_name} - {action_input}")
|
247 |
return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
|
248 |
except Exception as e:
|
@@ -250,14 +240,11 @@ def run_action(purpose, task, history, directory, action_name, action_input):
|
|
250 |
logging.error(f"Error in run_action: {e}")
|
251 |
return "MAIN", None, history, task
|
252 |
|
253 |
-
def run(purpose,history):
|
254 |
-
|
255 |
-
|
256 |
-
#print(hist)
|
257 |
-
task=None
|
258 |
-
directory="./"
|
259 |
if history:
|
260 |
-
history=str(history).strip("[]")
|
261 |
if not history:
|
262 |
history = ""
|
263 |
|
@@ -280,23 +267,19 @@ def run(purpose,history):
|
|
280 |
action_input,
|
281 |
)
|
282 |
yield (history)
|
283 |
-
#yield ("",[(purpose,history)])
|
284 |
if task == "END":
|
285 |
return (history)
|
286 |
-
#return ("", [(purpose,history)])
|
287 |
-
|
288 |
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
return prompt
|
300 |
|
301 |
def main():
|
302 |
with gr.Blocks() as demo:
|
@@ -305,8 +288,7 @@ def main():
|
|
305 |
|
306 |
# Chat Interface
|
307 |
chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
|
308 |
-
|
309 |
-
|
310 |
# Input Components
|
311 |
message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
|
312 |
purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
|
@@ -331,4 +313,18 @@ def main():
|
|
331 |
history = gr.State([])
|
332 |
examples = [
|
333 |
["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
|
334 |
-
["Can you help me generate a Python function to calculate the factorial of a number?", "
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
now = datetime.now()
|
14 |
date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
|
15 |
|
16 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
|
|
|
|
17 |
|
18 |
# --- Set up logging ---
|
19 |
logging.basicConfig(
|
|
|
22 |
format="%(asctime)s - %(levelname)s - %(message)s",
|
23 |
)
|
24 |
|
25 |
+
agents = [
|
26 |
"WEB_DEV",
|
27 |
"AI_SYSTEM_PROMPT",
|
28 |
"PYTHON_CODE_DEV"
|
29 |
]
|
|
|
30 |
|
31 |
VERBOSE = True
|
32 |
MAX_HISTORY = 5
|
|
|
33 |
|
34 |
PREFIX = """
|
35 |
{date_time_str}
|
|
|
76 |
"""
|
77 |
|
78 |
def format_prompt(message, history, max_history_turns=2):
|
79 |
+
prompt = " "
|
80 |
+
# Keep only the last 'max_history_turns' turns
|
81 |
+
for user_prompt, bot_response in history[-max_history_turns:]:
|
82 |
+
prompt += f"[INST] {user_prompt} [/INST] {bot_response} "
|
83 |
+
prompt += f"[INST] {message} [/INST] "
|
84 |
+
return prompt
|
|
|
85 |
|
86 |
def run_gpt(
|
87 |
prompt_template,
|
|
|
90 |
purpose,
|
91 |
**prompt_kwargs,
|
92 |
):
|
93 |
+
seed = random.randint(1, 1111111111111111)
|
94 |
logging.info(f"Seed: {seed}") # Log the seed
|
95 |
+
|
96 |
content = PREFIX.format(
|
97 |
date_time_str=date_time_str,
|
98 |
purpose=purpose,
|
|
|
100 |
) + prompt_template.format(**prompt_kwargs)
|
101 |
if VERBOSE:
|
102 |
logging.info(LOG_PROMPT.format(content)) # Log the prompt
|
103 |
+
|
104 |
resp = client.text_generation(content, max_new_tokens=max_tokens, stop_sequences=stop_tokens, temperature=0.7, top_p=0.8, repetition_penalty=1.5)
|
105 |
if VERBOSE:
|
106 |
logging.info(LOG_RESPONSE.format(resp)) # Log the response
|
107 |
return resp
|
108 |
|
109 |
+
def generate(
|
110 |
+
prompt,
|
111 |
+
history,
|
112 |
+
agent_name=agents[0],
|
113 |
+
sys_prompt="",
|
114 |
+
temperature=0.7,
|
115 |
+
max_new_tokens=2048,
|
116 |
+
top_p=0.8,
|
117 |
+
repetition_penalty=1.5,
|
118 |
+
):
|
119 |
content = PREFIX.format(
|
120 |
date_time_str=date_time_str,
|
121 |
purpose=purpose,
|
122 |
safe_search=safe_search,
|
123 |
+
) + prompt
|
124 |
if VERBOSE:
|
125 |
logging.info(LOG_PROMPT.format(content)) # Log the prompt
|
126 |
+
|
127 |
+
stream = client.text_generation(content, stream=True, details=True, return_full_text=False, temperature=temperature, top_p=top_p, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens)
|
128 |
resp = ""
|
129 |
for response in stream:
|
130 |
resp += response.token.text
|
|
|
133 |
logging.info(LOG_RESPONSE.format(resp)) # Log the response
|
134 |
return resp
|
135 |
|
|
|
136 |
def compress_history(purpose, task, history, directory):
|
137 |
resp = run_gpt(
|
138 |
COMPRESS_HISTORY_PROMPT,
|
|
|
144 |
)
|
145 |
history = "observation: {}\n".format(resp)
|
146 |
return history
|
147 |
+
|
148 |
def call_search(purpose, task, history, directory, action_input):
|
149 |
logging.info(f"CALLING SEARCH: {action_input}")
|
150 |
try:
|
|
|
151 |
if "http" in action_input:
|
152 |
+
action_input = action_input.strip("<>").strip()
|
|
|
|
|
|
|
|
|
153 |
response = i_s(action_input)
|
|
|
154 |
logging.info(f"Search Result: {response}")
|
155 |
history += "observation: search result is: {}\n".format(response)
|
156 |
else:
|
|
|
163 |
logging.info(f"CALLING MAIN: {action_input}")
|
164 |
resp = run_gpt(
|
165 |
ACTION_PROMPT,
|
166 |
+
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
167 |
max_tokens=32000,
|
168 |
purpose=purpose,
|
169 |
task=task,
|
170 |
history=history,
|
171 |
)
|
172 |
+
lines = resp.strip().split("\n")
|
173 |
for line in lines:
|
174 |
if line == "":
|
175 |
continue
|
|
|
177 |
history += "{}\n".format(line)
|
178 |
logging.info(f"Thought: {line}")
|
179 |
elif line.startswith("action: "):
|
|
|
180 |
action_name, action_input = parse_action(line)
|
181 |
logging.info(f"Action: {action_name} - {action_input}")
|
182 |
history += "{}\n".format(line)
|
|
|
188 |
else:
|
189 |
history += "{}\n".format(line)
|
190 |
logging.info(f"Other Output: {line}")
|
|
|
|
|
|
|
|
|
191 |
return "MAIN", None, history, task
|
192 |
|
|
|
193 |
def call_set_task(purpose, task, history, directory, action_input):
|
194 |
logging.info(f"CALLING SET_TASK: {action_input}")
|
195 |
task = run_gpt(
|
|
|
213 |
"UPDATE-TASK": call_set_task,
|
214 |
"SEARCH": call_search,
|
215 |
"COMPLETE": end_fn,
|
|
|
216 |
}
|
217 |
|
218 |
def run_action(purpose, task, history, directory, action_name, action_input):
|
219 |
logging.info(f"RUNNING ACTION: {action_name} - {action_input}")
|
220 |
try:
|
221 |
if "RESPONSE" in action_name or "COMPLETE" in action_name:
|
222 |
+
action_name = "COMPLETE"
|
223 |
+
task = "END"
|
224 |
return action_name, "COMPLETE", history, task
|
225 |
+
|
226 |
# compress the history when it is long
|
227 |
if len(history.split("\n")) > MAX_HISTORY:
|
228 |
logging.info("COMPRESSING HISTORY")
|
229 |
history = compress_history(purpose, task, history, directory)
|
230 |
+
if action_name not in NAME_TO_FUNC:
|
231 |
+
action_name = "MAIN"
|
232 |
+
if action_name == "" or action_name is None:
|
233 |
+
action_name = "MAIN"
|
234 |
assert action_name in NAME_TO_FUNC
|
235 |
+
|
236 |
logging.info(f"RUN: {action_name} - {action_input}")
|
237 |
return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
|
238 |
except Exception as e:
|
|
|
240 |
logging.error(f"Error in run_action: {e}")
|
241 |
return "MAIN", None, history, task
|
242 |
|
243 |
+
def run(purpose, history):
|
244 |
+
task = None
|
245 |
+
directory = "./"
|
|
|
|
|
|
|
246 |
if history:
|
247 |
+
history = str(history).strip("[]")
|
248 |
if not history:
|
249 |
history = ""
|
250 |
|
|
|
267 |
action_input,
|
268 |
)
|
269 |
yield (history)
|
|
|
270 |
if task == "END":
|
271 |
return (history)
|
|
|
|
|
272 |
|
273 |
+
def parse_action(line):
|
274 |
+
"""Parse the action line to get the action name and input."""
|
275 |
+
parts = line.split(":", 1)
|
276 |
+
if len(parts) == 2:
|
277 |
+
action_name = parts[0].replace("action", "").strip()
|
278 |
+
action_input = parts[1].strip()
|
279 |
+
else:
|
280 |
+
action_name = parts[0].replace("action", "").strip()
|
281 |
+
action_input = ""
|
282 |
+
return action_name, action_input
|
|
|
283 |
|
284 |
def main():
|
285 |
with gr.Blocks() as demo:
|
|
|
288 |
|
289 |
# Chat Interface
|
290 |
chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
|
291 |
+
|
|
|
292 |
# Input Components
|
293 |
message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
|
294 |
purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
|
|
|
313 |
history = gr.State([])
|
314 |
examples = [
|
315 |
["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
|
316 |
+
["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"],
|
317 |
+
]
|
318 |
+
|
319 |
+
def chat(purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history):
|
320 |
+
prompt = format_prompt(message, history)
|
321 |
+
response = generate(prompt, history, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty)
|
322 |
+
history.append((message, response))
|
323 |
+
return history, history
|
324 |
+
|
325 |
+
submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
|
326 |
+
|
327 |
+
demo.launch()
|
328 |
+
|
329 |
+
if __name__ == "__main__":
|
330 |
+
main()
|