Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,283 +1,108 @@
|
|
1 |
import os
|
2 |
-
import subprocess
|
3 |
-
import random
|
4 |
from huggingface_hub import InferenceClient
|
5 |
import gradio as gr
|
6 |
-
|
7 |
-
from
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
repetition_penalty=1.0,
|
63 |
-
do_sample=True,
|
64 |
-
seed=seed,
|
65 |
-
)
|
66 |
-
|
67 |
-
|
68 |
-
content = PREFIX.format(
|
69 |
-
date_time_str=date_time_str,
|
70 |
-
purpose=purpose,
|
71 |
-
safe_search=safe_search,
|
72 |
-
) + prompt_template.format(**prompt_kwargs)
|
73 |
-
if VERBOSE:
|
74 |
-
print(LOG_PROMPT.format(content))
|
75 |
-
|
76 |
-
|
77 |
-
#formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
|
78 |
-
#formatted_prompt = format_prompt(f'{content}', history)
|
79 |
-
|
80 |
-
stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
81 |
-
resp = ""
|
82 |
-
for response in stream:
|
83 |
-
resp += response.token.text
|
84 |
-
|
85 |
-
if VERBOSE:
|
86 |
-
print(LOG_RESPONSE.format(resp))
|
87 |
-
return resp
|
88 |
-
|
89 |
-
|
90 |
-
def compress_history(purpose, task, history, directory):
|
91 |
-
resp = run_gpt(
|
92 |
-
COMPRESS_HISTORY_PROMPT,
|
93 |
-
stop_tokens=["observation:", "task:", "action:", "thought:"],
|
94 |
-
max_tokens=512,
|
95 |
-
purpose=purpose,
|
96 |
-
task=task,
|
97 |
-
history=history,
|
98 |
-
)
|
99 |
-
history = "observation: {}\n".format(resp)
|
100 |
-
return history
|
101 |
-
|
102 |
-
def call_search(purpose, task, history, directory, action_input):
|
103 |
-
print("CALLING SEARCH")
|
104 |
-
try:
|
105 |
-
|
106 |
-
if "http" in action_input:
|
107 |
-
if "<" in action_input:
|
108 |
-
action_input = action_input.strip("<")
|
109 |
-
if ">" in action_input:
|
110 |
-
action_input = action_input.strip(">")
|
111 |
-
|
112 |
-
response = i_s(action_input)
|
113 |
-
#response = google(search_return)
|
114 |
-
print(response)
|
115 |
-
history += "observation: search result is: {}\n".format(response)
|
116 |
-
else:
|
117 |
-
history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
|
118 |
-
except Exception as e:
|
119 |
-
history += "observation: {}'\n".format(e)
|
120 |
-
return "MAIN", None, history, task
|
121 |
-
|
122 |
-
def call_main(purpose, task, history, directory, action_input):
|
123 |
-
resp = run_gpt(
|
124 |
-
ACTION_PROMPT,
|
125 |
-
stop_tokens=["observation:", "task:", "action:","thought:"],
|
126 |
-
max_tokens=2096,
|
127 |
-
purpose=purpose,
|
128 |
-
task=task,
|
129 |
-
history=history,
|
130 |
-
)
|
131 |
-
lines = resp.strip().strip("\n").split("\n")
|
132 |
-
for line in lines:
|
133 |
-
if line == "":
|
134 |
-
continue
|
135 |
-
if line.startswith("thought: "):
|
136 |
-
history += "{}\n".format(line)
|
137 |
-
elif line.startswith("action: "):
|
138 |
-
|
139 |
-
action_name, action_input = parse_action(line)
|
140 |
-
print (f'ACTION_NAME :: {action_name}')
|
141 |
-
print (f'ACTION_INPUT :: {action_input}')
|
142 |
-
|
143 |
-
history += "{}\n".format(line)
|
144 |
-
if "COMPLETE" in action_name or "COMPLETE" in action_input:
|
145 |
-
task = "END"
|
146 |
-
return action_name, action_input, history, task
|
147 |
-
else:
|
148 |
-
return action_name, action_input, history, task
|
149 |
-
else:
|
150 |
-
history += "{}\n".format(line)
|
151 |
-
#history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
|
152 |
-
|
153 |
-
#return action_name, action_input, history, task
|
154 |
-
#assert False, "unknown action: {}".format(line)
|
155 |
-
return "MAIN", None, history, task
|
156 |
-
|
157 |
-
|
158 |
-
def call_set_task(purpose, task, history, directory, action_input):
|
159 |
-
task = run_gpt(
|
160 |
-
TASK_PROMPT,
|
161 |
-
stop_tokens=[],
|
162 |
-
max_tokens=64,
|
163 |
-
purpose=purpose,
|
164 |
-
task=task,
|
165 |
-
history=history,
|
166 |
-
).strip("\n")
|
167 |
-
history += "observation: task has been updated to: {}\n".format(task)
|
168 |
-
return "MAIN", None, history, task
|
169 |
-
|
170 |
-
def end_fn(purpose, task, history, directory, action_input):
|
171 |
-
task = "END"
|
172 |
-
return "COMPLETE", "COMPLETE", history, task
|
173 |
-
|
174 |
-
NAME_TO_FUNC = {
|
175 |
-
"MAIN": call_main,
|
176 |
-
"UPDATE-TASK": call_set_task,
|
177 |
-
"SEARCH": call_search,
|
178 |
-
"COMPLETE": end_fn,
|
179 |
-
|
180 |
}
|
181 |
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
print("")
|
224 |
-
print("---")
|
225 |
-
print("purpose:", purpose)
|
226 |
-
print("task:", task)
|
227 |
-
print("---")
|
228 |
-
print(history)
|
229 |
-
print("---")
|
230 |
-
|
231 |
-
action_name, action_input, history, task = run_action(
|
232 |
-
purpose,
|
233 |
-
task,
|
234 |
-
history,
|
235 |
-
directory,
|
236 |
-
action_name,
|
237 |
-
action_input,
|
238 |
-
)
|
239 |
-
yield (history)
|
240 |
-
#yield ("",[(purpose,history)])
|
241 |
-
if task == "END":
|
242 |
-
return (history)
|
243 |
-
#return ("", [(purpose,history)])
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
################################################
|
248 |
-
|
249 |
-
def format_prompt(message, history):
|
250 |
-
prompt = "<s>"
|
251 |
-
for user_prompt, bot_response in history:
|
252 |
-
prompt += f"[INST] {user_prompt} [/INST]"
|
253 |
-
prompt += f" {bot_response}</s> "
|
254 |
-
prompt += f"[INST] {message} [/INST]"
|
255 |
-
return prompt
|
256 |
-
agents =[
|
257 |
-
"WEB_DEV",
|
258 |
-
"AI_SYSTEM_PROMPT",
|
259 |
-
"PYTHON_CODE_DEV"
|
260 |
-
]
|
261 |
-
def generate(
|
262 |
-
prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
|
263 |
-
):
|
264 |
-
seed = random.randint(1,1111111111111111)
|
265 |
-
|
266 |
-
agent=prompts.WEB_DEV
|
267 |
-
if agent_name == "WEB_DEV":
|
268 |
-
agent = prompts.WEB_DEV
|
269 |
-
if agent_name == "AI_SYSTEM_PROMPT":
|
270 |
-
agent = prompts.AI_SYSTEM_PROMPT
|
271 |
-
if agent_name == "PYTHON_CODE_DEV":
|
272 |
-
agent = prompts.PYTHON_CODE_DEV
|
273 |
-
system_prompt=agent
|
274 |
-
temperature = float(temperature)
|
275 |
-
if temperature < 1e-2:
|
276 |
-
temperature = 1e-2
|
277 |
-
top_p = float(top_p)
|
278 |
|
279 |
generate_kwargs = dict(
|
280 |
-
temperature=temperature,
|
281 |
max_new_tokens=max_new_tokens,
|
282 |
top_p=top_p,
|
283 |
repetition_penalty=repetition_penalty,
|
@@ -285,119 +110,122 @@ def generate(
|
|
285 |
seed=seed,
|
286 |
)
|
287 |
|
288 |
-
formatted_prompt = format_prompt(
|
289 |
-
|
290 |
-
output =
|
291 |
-
|
292 |
-
|
293 |
-
output += response.token.text
|
294 |
-
yield output
|
295 |
-
return output
|
296 |
-
|
297 |
-
|
298 |
-
additional_inputs=[
|
299 |
-
gr.Dropdown(
|
300 |
-
label="Agents",
|
301 |
-
choices=[s for s in agents],
|
302 |
-
value=agents[0],
|
303 |
-
interactive=True,
|
304 |
-
),
|
305 |
-
gr.Textbox(
|
306 |
-
label="System Prompt",
|
307 |
-
max_lines=1,
|
308 |
-
interactive=True,
|
309 |
-
),
|
310 |
-
gr.Slider(
|
311 |
-
label="Temperature",
|
312 |
-
value=0.9,
|
313 |
-
minimum=0.0,
|
314 |
-
maximum=1.0,
|
315 |
-
step=0.05,
|
316 |
-
interactive=True,
|
317 |
-
info="Higher values produce more diverse outputs",
|
318 |
-
),
|
319 |
-
|
320 |
-
gr.Slider(
|
321 |
-
label="Max new tokens",
|
322 |
-
value=1048*10,
|
323 |
-
minimum=0,
|
324 |
-
maximum=1048*10,
|
325 |
-
step=64,
|
326 |
-
interactive=True,
|
327 |
-
info="The maximum numbers of new tokens",
|
328 |
-
),
|
329 |
-
gr.Slider(
|
330 |
-
label="Top-p (nucleus sampling)",
|
331 |
-
value=0.90,
|
332 |
-
minimum=0.0,
|
333 |
-
maximum=1,
|
334 |
-
step=0.05,
|
335 |
-
interactive=True,
|
336 |
-
info="Higher values sample more low-probability tokens",
|
337 |
-
),
|
338 |
-
gr.Slider(
|
339 |
-
label="Repetition penalty",
|
340 |
-
value=1.2,
|
341 |
-
minimum=1.0,
|
342 |
-
maximum=2.0,
|
343 |
-
step=0.05,
|
344 |
-
interactive=True,
|
345 |
-
info="Penalize repeated tokens",
|
346 |
-
),
|
347 |
|
|
|
|
|
|
|
|
|
|
|
|
|
348 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
349 |
]
|
350 |
|
351 |
-
examples=[
|
352 |
-
["Create a simple web application using Flask",
|
353 |
-
["Generate a Python script to perform a linear regression analysis",
|
354 |
-
["Create a Dockerfile for a Node.js application",
|
355 |
-
|
356 |
-
["Generate a SQL query to retrieve the top 10 most popular products by sales", agents[4], None, None, None, None, ],
|
357 |
-
["Write a Python script to generate a random password with a given length and complexity", agents[2], None, None, None, None, ],
|
358 |
-
["Create a simple game in Unity using C#", agents[0], None, None, None, None, ],
|
359 |
-
["Generate a Java program to implement a binary search algorithm", agents[2], None, None, None, None, ],
|
360 |
-
["Write a shell script to monitor the CPU usage of a server", agents[1], None, None, None, None, ],
|
361 |
-
["Create a simple web application using React and Node.js", agents[0], None, None, None, None, ],
|
362 |
-
["Generate a Python script to perform a sentiment analysis on a given text", agents[2], None, None, None, None, ],
|
363 |
-
["Write a shell script to automate the backup of a MySQL database", agents[1], None, None, None, None, ],
|
364 |
-
["Create a simple game in Unreal Engine using C++", agents[3], None, None, None, None, ],
|
365 |
-
["Generate a Java program to implement a bubble sort algorithm", agents[2], None, None, None, None, ],
|
366 |
-
["Write a shell script to monitor the memory usage of a server", agents[1], None, None, None, None, ],
|
367 |
-
["Create a simple web application using Angular and Node.js", agents[0], None, None, None, None, ],
|
368 |
-
["Generate a Python script to perform a text classification on a given dataset", agents[2], None, None, None, None, ],
|
369 |
-
["Write a shell script to automate the installation of a software package on a server", agents[1], None, None, None, None, ],
|
370 |
-
["Create a simple game in Godot using GDScript", agents[3], None, None, None, None, ],
|
371 |
-
["Generate a Java program to implement a merge sort algorithm", agents[2], None, None, None, None, ],
|
372 |
-
["Write a shell script to automate the cleanup of temporary files on a server", agents[1], None, None, None, None, ],
|
373 |
]
|
374 |
|
375 |
-
'''
|
376 |
-
gr.ChatInterface(
|
377 |
-
fn=run,
|
378 |
-
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
379 |
-
title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
|
380 |
-
examples=examples,
|
381 |
-
concurrency_limit=20,
|
382 |
-
with gr.Blocks() as ifacea:
|
383 |
-
gr.HTML("""TEST""")
|
384 |
-
ifacea.launch()
|
385 |
-
).launch()
|
386 |
-
with gr.Blocks() as iface:
|
387 |
-
#chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
388 |
-
chatbot=gr.Chatbot()
|
389 |
-
msg = gr.Textbox()
|
390 |
-
with gr.Row():
|
391 |
-
submit_b = gr.Button()
|
392 |
-
clear = gr.ClearButton([msg, chatbot])
|
393 |
-
submit_b.click(run, [msg,chatbot],[msg,chatbot])
|
394 |
-
msg.submit(run, [msg, chatbot], [msg, chatbot])
|
395 |
-
iface.launch()
|
396 |
-
'''
|
397 |
gr.ChatInterface(
|
398 |
-
fn=
|
399 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
400 |
-
|
|
|
401 |
examples=examples,
|
402 |
concurrency_limit=20,
|
403 |
-
).launch(show_api=
|
|
|
1 |
import os
|
|
|
|
|
2 |
from huggingface_hub import InferenceClient
|
3 |
import gradio as gr
|
4 |
+
import random
|
5 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
6 |
+
import subprocess
|
7 |
+
import threading
|
8 |
+
import time
|
9 |
+
import json
|
10 |
+
import streamlit as st
|
11 |
+
|
12 |
+
# Initialize the session state
|
13 |
+
if 'current_state' not in st.session_state:
|
14 |
+
st.session_state.current_state = None
|
15 |
+
# Initialize the InferenceClient for Mixtral-8x7B-Instruct-v0.1
|
16 |
+
client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
17 |
+
|
18 |
+
# Load the model and tokenizer from a different repository
|
19 |
+
model_name = "bigscience/bloom-1b7"
|
20 |
+
model = AutoModelForCausalLM.from_pretrained(model_name)
|
21 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
22 |
+
|
23 |
+
# Define the agents
|
24 |
+
agents = {
|
25 |
+
"WEB_DEV": {
|
26 |
+
"description": "Expert in web development technologies and frameworks.",
|
27 |
+
"skills": ["HTML", "CSS", "JavaScript", "React", "Vue.js", "Flask", "Django", "Node.js", "Express.js"],
|
28 |
+
"system_prompt": "You are a web development expert. Your goal is to assist the user in building and deploying web applications. Provide code snippets, explanations, and guidance on best practices.",
|
29 |
+
},
|
30 |
+
"AI_SYSTEM_PROMPT": {
|
31 |
+
"description": "Expert in designing and implementing AI systems.",
|
32 |
+
"skills": ["Machine Learning", "Deep Learning", "Natural Language Processing", "Computer Vision", "Reinforcement Learning"],
|
33 |
+
"system_prompt": "You are an AI system expert. Your goal is to assist the user in designing and implementing AI systems. Provide code snippets, explanations, and guidance on best practices.",
|
34 |
+
},
|
35 |
+
"PYTHON_CODE_DEV": {
|
36 |
+
"description": "Expert in Python programming and development.",
|
37 |
+
"skills": ["Python", "Data Structures", "Algorithms", "Object-Oriented Programming", "Functional Programming"],
|
38 |
+
"system_prompt": "You are a Python code development expert. Your goal is to assist the user in writing and debugging Python code. Provide code snippets, explanations, and guidance on best practices.",
|
39 |
+
},
|
40 |
+
"CODE_REVIEW_ASSISTANT": {
|
41 |
+
"description": "Expert in code review and quality assurance.",
|
42 |
+
"skills": ["Code Style", "Best Practices", "Security", "Performance", "Maintainability"],
|
43 |
+
"system_prompt": "You are a code review assistant. Your goal is to assist the user in reviewing code for quality and efficiency. Provide feedback on code style, best practices, security, performance, and maintainability.",
|
44 |
+
},
|
45 |
+
"CONTENT_WRITER_EDITOR": {
|
46 |
+
"description": "Expert in content writing and editing.",
|
47 |
+
"skills": ["Grammar", "Style", "Clarity", "Conciseness", "SEO"],
|
48 |
+
"system_prompt": "You are a content writer and editor. Your goal is to assist the user in creating high-quality content. Provide suggestions on grammar, style, clarity, conciseness, and SEO.",
|
49 |
+
},
|
50 |
+
"QUESTION_GENERATOR": {
|
51 |
+
"description": "Expert in generating questions for learning and assessment.",
|
52 |
+
"skills": ["Question Types", "Cognitive Levels", "Assessment Design"],
|
53 |
+
"system_prompt": "You are a question generator. Your goal is to assist the user in generating questions for learning and assessment. Provide questions that are relevant to the topic and aligned with the cognitive levels.",
|
54 |
+
},
|
55 |
+
"HUGGINGFACE_FILE_DEV": {
|
56 |
+
"description": "Expert in developing Hugging Face files for machine learning models.",
|
57 |
+
"skills": ["Transformers", "Datasets", "Model Training", "Model Deployment"],
|
58 |
+
"system_prompt": "You are a Hugging Face file development expert. Your goal is to assist the user in creating and deploying Hugging Face files for machine learning models. Provide code snippets, explanations, and guidance on best practices.",
|
59 |
+
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
}
|
61 |
|
62 |
+
class AIAgent:
|
63 |
+
def __init__(self, name, description, skills, system_prompt):
|
64 |
+
self.name = name
|
65 |
+
self.description = description
|
66 |
+
self.skills = skills
|
67 |
+
self.system_prompt = system_prompt
|
68 |
+
self.active = False
|
69 |
+
|
70 |
+
def activate(self):
|
71 |
+
self.active = True
|
72 |
+
|
73 |
+
def deactivate(self):
|
74 |
+
self.active = False
|
75 |
+
|
76 |
+
def create_agent_prompt(self):
|
77 |
+
skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
|
78 |
+
agent_prompt = f"""
|
79 |
+
As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
|
80 |
+
{skills_str}
|
81 |
+
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
|
82 |
+
"""
|
83 |
+
return agent_prompt
|
84 |
+
|
85 |
+
def autonomous_build(self, chat_history, workspace_projects):
|
86 |
+
summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
|
87 |
+
summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
|
88 |
+
next_step = "Based on the current state, the next logical step is to implement the main application logic."
|
89 |
+
return summary, next_step
|
90 |
+
|
91 |
+
def format_prompt(message, history, agent_prompt):
|
92 |
+
prompt = "<s>"
|
93 |
+
for user_prompt, bot_response in history:
|
94 |
+
prompt += f"[INST] {user_prompt} [/INST]"
|
95 |
+
prompt += f" {bot_response}</s> "
|
96 |
+
prompt += f"[INST] {agent_prompt}, {message} [/INST]"
|
97 |
+
return prompt
|
98 |
+
|
99 |
+
def generate(prompt, history, agent_name, temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0):
|
100 |
+
seed = random.randint(1, 1111111111111111)
|
101 |
+
agent = agents[agent_name]
|
102 |
+
system_prompt = agent["system_prompt"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
generate_kwargs = dict(
|
105 |
+
temperature=float(temperature),
|
106 |
max_new_tokens=max_new_tokens,
|
107 |
top_p=top_p,
|
108 |
repetition_penalty=repetition_penalty,
|
|
|
110 |
seed=seed,
|
111 |
)
|
112 |
|
113 |
+
formatted_prompt = format_prompt(prompt, history, system_prompt)
|
114 |
+
input_ids = tokenizer.encode(formatted_prompt, return_tensors="pt")
|
115 |
+
output = model.generate(input_ids, **generate_kwargs)
|
116 |
+
response = tokenizer.decode(output[0], skip_special_tokens=True)
|
117 |
+
return response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
118 |
|
119 |
+
def chat_interface(chat_input, agent_name):
|
120 |
+
if agents[agent_name].active:
|
121 |
+
response = generate(chat_input, st.session_state.chat_history, agent_name)
|
122 |
+
return response
|
123 |
+
else:
|
124 |
+
return "Agent is not active. Please activate the agent."
|
125 |
|
126 |
+
def terminal_interface(command, project_name):
|
127 |
+
try:
|
128 |
+
result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_name)
|
129 |
+
return result.stdout if result.returncode == 0 else result.stderr
|
130 |
+
except Exception as e:
|
131 |
+
return str(e)
|
132 |
+
|
133 |
+
def add_code_to_workspace(project_name, code, file_name):
|
134 |
+
project_path = os.path.join(os.getcwd(), project_name)
|
135 |
+
if not os.path.exists(project_path):
|
136 |
+
os.makedirs(project_path)
|
137 |
+
file_path = os.path.join(project_path, file_name)
|
138 |
+
with open(file_path, 'w') as file:
|
139 |
+
file.write(code)
|
140 |
+
if project_name not in st.session_state.workspace_projects:
|
141 |
+
st.session_state.workspace_projects[project_name] = {'files': []}
|
142 |
+
st.session_state.workspace_projects[project_name]['files'].append(file_name)
|
143 |
+
return f"Added {file_name} to {project_name}"
|
144 |
+
|
145 |
+
# Streamlit UI
|
146 |
+
st.title("DevToolKit: AI-Powered Development Environment")
|
147 |
+
|
148 |
+
# Project Management
|
149 |
+
st.header("Project Management")
|
150 |
+
project_name = st.text_input("Enter project name:")
|
151 |
+
if st.button("Create Project"):
|
152 |
+
if project_name not in st.session_state.workspace_projects:
|
153 |
+
st.session_state.workspace_projects[project_name] = {'files': []}
|
154 |
+
st.success(f"Created project: {project_name}")
|
155 |
+
else:
|
156 |
+
st.warning(f"Project {project_name} already exists")
|
157 |
+
|
158 |
+
# Code Addition
|
159 |
+
st.subheader("Add Code to Workspace")
|
160 |
+
code_to_add = st.text_area("Enter code to add to workspace:")
|
161 |
+
file_name = st.text_input("Enter file name (e.g. 'app.py'):")
|
162 |
+
if st.button("Add Code"):
|
163 |
+
add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
|
164 |
+
st.success(add_code_status)
|
165 |
+
|
166 |
+
# Terminal Interface
|
167 |
+
st.subheader("Terminal (Workspace Context)")
|
168 |
+
terminal_input = st.text_input("Enter a command within the workspace:")
|
169 |
+
if st.button("Run Command"):
|
170 |
+
terminal_output = terminal_interface(terminal_input, project_name)
|
171 |
+
st.code(terminal_output, language="bash")
|
172 |
+
|
173 |
+
# Chat Interface
|
174 |
+
st.subheader("Chat with AI Agents")
|
175 |
+
selected_agent = st.selectbox("Select an AI agent", list(agents.keys()))
|
176 |
+
agent_chat_input = st.text_area("Enter your message for the agent:")
|
177 |
+
if st.button("Send to Agent"):
|
178 |
+
agent_chat_response = chat_interface(agent_chat_input, selected_agent)
|
179 |
+
st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
|
180 |
+
st.write(f"{selected_agent}: {agent_chat_response}")
|
181 |
+
|
182 |
+
# Agent Control
|
183 |
+
st.subheader("Agent Control")
|
184 |
+
for agent_name in agents:
|
185 |
+
agent = agents[agent_name]
|
186 |
+
with st.expander(f"{agent_name} ({agent['description']})"):
|
187 |
+
if st.button(f"Activate {agent_name}"):
|
188 |
+
agent.activate()
|
189 |
+
st.success(f"{agent_name} activated.")
|
190 |
+
if st.button(f"Deactivate {agent_name}"):
|
191 |
+
agent.deactivate()
|
192 |
+
st.success(f"{agent_name} deactivated.")
|
193 |
+
|
194 |
+
# Automate Build Process
|
195 |
+
st.subheader("Automate Build Process")
|
196 |
+
if st.button("Automate"):
|
197 |
+
# Select the appropriate agent based on the current context
|
198 |
+
# ...
|
199 |
+
# Implement the autonomous build process
|
200 |
+
# ...
|
201 |
+
pass
|
202 |
+
|
203 |
+
# Display current state for debugging
|
204 |
+
st.sidebar.subheader("Current State")
|
205 |
+
st.sidebar.json(st.session_state.current_state)
|
206 |
+
|
207 |
+
# Gradio Interface
|
208 |
+
additional_inputs = [
|
209 |
+
gr.Dropdown(label="Agents", choices=[s for s in agents.keys()], value=list(agents.keys())[0], interactive=True),
|
210 |
+
gr.Textbox(label="System Prompt", max_lines=1, interactive=True),
|
211 |
+
gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs"),
|
212 |
+
gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1000*10, step=64, interactive=True, info="The maximum numbers of new tokens"),
|
213 |
+
gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens"),
|
214 |
+
gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens"),
|
215 |
]
|
216 |
|
217 |
+
examples = [
|
218 |
+
["Create a simple web application using Flask", "WEB_DEV"],
|
219 |
+
["Generate a Python script to perform a linear regression analysis", "PYTHON_CODE_DEV"],
|
220 |
+
["Create a Dockerfile for a Node.js application", "AI_SYSTEM_PROMPT"],
|
221 |
+
# Add more examples as needed
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
]
|
223 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
gr.ChatInterface(
|
225 |
+
fn=chat_interface,
|
226 |
chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
|
227 |
+
additional_inputs=additional_inputs,
|
228 |
+
title="DevToolKit AI Assistant",
|
229 |
examples=examples,
|
230 |
concurrency_limit=20,
|
231 |
+
).launch(show_api=True)
|