Spaces:
Build error
Build error
acecalisto3
commited on
Commit
•
83c7399
1
Parent(s):
a9a0e14
Update app.py
Browse files
app.py
CHANGED
@@ -1,452 +1,310 @@
|
|
1 |
-
from huggingface_hub import InferenceClient, hf_hub_url
|
2 |
-
import gradio as gr
|
3 |
-
import random
|
4 |
import os
|
5 |
import subprocess
|
6 |
-
import
|
7 |
-
import
|
8 |
-
import
|
9 |
-
|
10 |
-
import
|
11 |
-
from
|
12 |
-
from
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
#
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
"""
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
#
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
return {agent: agent_roles[agent]["active"] for agent in agent_roles}
|
92 |
-
|
93 |
-
# Function to execute code
|
94 |
-
def run_code(code: str) -> str:
|
95 |
-
"""Executes the provided code and returns the output."""
|
96 |
try:
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
prompt = f"""
|
110 |
-
You are an expert agent cluster, consisting of {', '.join(agent_roles)}.
|
111 |
-
Respond with complete program coding to client requests.
|
112 |
-
Using available tools, please explain the researched information.
|
113 |
-
Please don't answer based solely on what you already know. Always perform a search before providing a response.
|
114 |
-
In special cases, such as when the user specifies a page to read, there's no need to search.
|
115 |
-
Please read the provided page and answer the user's question accordingly.
|
116 |
-
If you find that there's not much information just by looking at the search results page, consider these two options and try them out:
|
117 |
-
- Try clicking on the links of the search results to access and read the content of each page.
|
118 |
-
- Change your search query and perform a new search.
|
119 |
-
Users are extremely busy and not as free as you are.
|
120 |
-
Therefore, to save the user's effort, please provide direct answers.
|
121 |
-
BAD ANSWER EXAMPLE
|
122 |
-
- Please refer to these pages.
|
123 |
-
- You can write code referring these pages.
|
124 |
-
- Following page will be helpful.
|
125 |
-
GOOD ANSWER EXAMPLE
|
126 |
-
- This is the complete code: -- complete code here --
|
127 |
-
- The answer of you question is -- answer here --
|
128 |
-
Please make sure to list the URLs of the pages you referenced at the end of your answer. (This will allow users to verify your response.)
|
129 |
-
Please make sure to answer in the language used by the user. If the user asks in Japanese, please answer in Japanese. If the user asks in Spanish, please answer in Spanish.
|
130 |
-
But, you can go ahead and search in English, especially for programming-related questions. PLEASE MAKE SURE TO ALWAYS SEARCH IN ENGLISH FOR THOSE.
|
131 |
-
"""
|
132 |
-
|
133 |
-
for user_prompt, bot_response in history:
|
134 |
-
prompt += f"[INST] {user_prompt} [/INST]"
|
135 |
-
prompt += f" {bot_response}</s> "
|
136 |
-
|
137 |
-
prompt += f"[INST] {message} [/INST]"
|
138 |
-
return prompt
|
139 |
-
|
140 |
-
# Function to generate a response
|
141 |
-
def generate(prompt: str, history: list[Tuple[str, str]], agent_roles: list[str], temperature: float = DEFAULT_TEMPERATURE, max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS, top_p: float = DEFAULT_TOP_P, repetition_penalty: float = DEFAULT_REPETITION_PENALTY) -> str:
|
142 |
-
"""Generates a response using the selected agent roles and parameters."""
|
143 |
-
temperature = float(temperature)
|
144 |
-
if temperature < 1e-2:
|
145 |
-
temperature = 1e-2
|
146 |
-
top_p = float(top_p)
|
147 |
-
|
148 |
-
generate_kwargs = dict(
|
149 |
-
temperature=temperature,
|
150 |
-
max_new_tokens=max_new_tokens,
|
151 |
-
top_p=top_p,
|
152 |
-
repetition_penalty=repetition_penalty,
|
153 |
-
do_sample=True,
|
154 |
-
seed=random.randint(0, 10**7),
|
155 |
-
)
|
156 |
-
|
157 |
-
formatted_prompt = format_prompt(prompt, history, agent_roles)
|
158 |
-
|
159 |
-
stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
|
160 |
-
output = ""
|
161 |
-
|
162 |
-
for response in stream:
|
163 |
-
output += response.token.text
|
164 |
-
yield output
|
165 |
-
return output
|
166 |
-
|
167 |
-
# Function to handle user input and generate responses
|
168 |
-
def chat_interface(message: str, history: list[Tuple[str, str]], agent_cluster: Dict[str, bool], temperature: float, max_new_tokens: int, top_p: float, repetition_penalty: float) -> Tuple[str, str]:
|
169 |
-
"""Handles user input and generates responses."""
|
170 |
-
rprint(f"[bold blue]User:[/bold blue] {message}") # Log user message
|
171 |
-
if message.startswith("python"):
|
172 |
-
# User entered code, execute it
|
173 |
-
code = message[9:-3]
|
174 |
-
output = run_code(code)
|
175 |
-
rprint(f"[bold green]Code Output:[/bold green] {output}") # Log code output
|
176 |
-
return (message, output)
|
177 |
else:
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
return "
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
246 |
try:
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
)
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
)
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
)
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
step=0.05,
|
369 |
-
interactive=True,
|
370 |
-
info="Higher values sample more low-probability tokens",
|
371 |
-
)
|
372 |
-
repetition_penalty_slider = gr.Slider(
|
373 |
-
label="Repetition Penalty",
|
374 |
-
value=DEFAULT_REPETITION_PENALTY,
|
375 |
-
minimum=1.0,
|
376 |
-
maximum=2.0,
|
377 |
-
step=0.05,
|
378 |
-
interactive=True,
|
379 |
-
info="Penalize repeated tokens",
|
380 |
-
)
|
381 |
-
|
382 |
-
# Submit Button
|
383 |
-
submit_button = gr.Button("Submit")
|
384 |
-
|
385 |
-
# Chat Interface Logic
|
386 |
-
submit_button.click(
|
387 |
-
chat_interface,
|
388 |
-
inputs=[
|
389 |
-
chat_interface_input,
|
390 |
-
chatbot,
|
391 |
-
get_agent_cluster,
|
392 |
-
temperature_slider,
|
393 |
-
max_new_tokens_slider,
|
394 |
-
top_p_slider,
|
395 |
-
repetition_penalty_slider,
|
396 |
-
],
|
397 |
-
outputs=[
|
398 |
-
chatbot,
|
399 |
-
chat_interface_output,
|
400 |
-
],
|
401 |
-
)
|
402 |
-
|
403 |
-
# --- Web App Creation ---
|
404 |
-
with gr.Row():
|
405 |
-
app_name_input = gr.Textbox(label="App Name", placeholder="Enter your app name")
|
406 |
-
code_output = gr.Textbox(label="Code", interactive=False)
|
407 |
-
create_web_app_button = gr.Button("Create Web App")
|
408 |
-
deploy_button = gr.Button("Deploy")
|
409 |
-
local_host_button = gr.Button("Local Host")
|
410 |
-
ship_button = gr.Button("Ship")
|
411 |
-
hf_token_input = gr.Textbox(label="Hugging Face Token", placeholder="Enter your Hugging Face token")
|
412 |
-
|
413 |
-
# Web App Creation Logic
|
414 |
-
create_web_app_button.click(
|
415 |
-
create_web_app_button_click,
|
416 |
-
inputs=[code_output],
|
417 |
-
outputs=[gr.Textbox(label="Status", interactive=False)],
|
418 |
-
)
|
419 |
-
|
420 |
-
# Deploy the web app
|
421 |
-
deploy_button.click(
|
422 |
-
deploy_button_click,
|
423 |
-
inputs=[app_name_input, code_output, hf_token_input],
|
424 |
-
outputs=[gr.Textbox(label="Status", interactive=False)],
|
425 |
-
)
|
426 |
-
|
427 |
-
# Local host the web app
|
428 |
-
local_host_button.click(
|
429 |
-
local_host_button_click,
|
430 |
-
inputs=[app_name_input, code_output],
|
431 |
-
outputs=[gr.Textbox(label="Status", interactive=False)],
|
432 |
-
)
|
433 |
-
|
434 |
-
# Ship the web app
|
435 |
-
ship_button.click(
|
436 |
-
ship_button_click,
|
437 |
-
inputs=[app_name_input, code_output],
|
438 |
-
outputs=[gr.Textbox(label="Status", interactive=False)],
|
439 |
-
)
|
440 |
-
|
441 |
-
# --- Connect Chat Output to Code Output ---
|
442 |
-
chat_interface_output.change(
|
443 |
-
lambda x: x,
|
444 |
-
inputs=[chat_interface_output],
|
445 |
-
outputs=[code_output],
|
446 |
-
)
|
447 |
-
|
448 |
-
# --- Initialize Hugging Face Client ---
|
449 |
-
client = InferenceClient(repo_id=MODEL_NAME, token=os.environ.get("HF_TOKEN"))
|
450 |
-
|
451 |
-
# --- Launch Gradio ---
|
452 |
-
demo.queue().launch(debug=True)
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
+
import logging
|
4 |
+
import streamlit as st
|
5 |
+
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoModel, RagRetriever, AutoModelForSeq2SeqLM
|
6 |
+
import torch
|
7 |
+
from datetime import datetime
|
8 |
+
from huggingface_hub import hf_hub_url, cached_download, HfApi
|
9 |
+
from dotenv import load_dotenv
|
10 |
+
|
11 |
+
# Constants
|
12 |
+
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
|
13 |
+
PROJECT_ROOT = "projects"
|
14 |
+
AGENT_DIRECTORY = "agents"
|
15 |
+
AVAILABLE_CODE_GENERATIVE_MODELS = [
|
16 |
+
"bigcode/starcoder", # Popular and powerful
|
17 |
+
"Salesforce/codegen-350M-mono", # Smaller, good for quick tasks
|
18 |
+
"microsoft/CodeGPT-small", # Smaller, good for quick tasks
|
19 |
+
"google/flan-t5-xl", # Powerful, good for complex tasks
|
20 |
+
"facebook/bart-large-cnn", # Good for text-to-code tasks
|
21 |
+
]
|
22 |
+
|
23 |
+
# Load environment variables
|
24 |
+
load_dotenv()
|
25 |
+
HF_TOKEN = os.getenv("HUGGING_FACE_API_KEY")
|
26 |
+
|
27 |
+
# Initialize logger
|
28 |
+
logging.basicConfig(level=logging.INFO)
|
29 |
+
|
30 |
+
# Global state to manage communication between Tool Box and Workspace Chat App
|
31 |
+
if 'chat_history' not in st.session_state:
|
32 |
+
st.session_state.chat_history = []
|
33 |
+
if 'terminal_history' not in st.session_state:
|
34 |
+
st.session_state.terminal_history = []
|
35 |
+
if 'workspace_projects' not in st.session_state:
|
36 |
+
st.session_state.workspace_projects = {}
|
37 |
+
if 'available_agents' not in st.session_state:
|
38 |
+
st.session_state.available_agents = []
|
39 |
+
if 'current_state' not in st.session_state:
|
40 |
+
st.session_state.current_state = {
|
41 |
+
'toolbox': {},
|
42 |
+
'workspace_chat': {}
|
43 |
+
}
|
44 |
+
|
45 |
+
# Load pre-trained RAG retriever
|
46 |
+
rag_retriever = RagRetriever.from_pretrained("facebook/rag-token-base") # Use a Hugging Face RAG model
|
47 |
+
|
48 |
+
# Load pre-trained chat model
|
49 |
+
chat_model = AutoModelForSeq2SeqLM.from_pretrained("microsoft/DialoGPT-medium") # Use a Hugging Face chat model
|
50 |
+
|
51 |
+
# Load tokenizer
|
52 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
|
53 |
+
|
54 |
+
def process_input(user_input):
|
55 |
+
# Input pipeline: Tokenize and preprocess user input
|
56 |
+
input_ids = tokenizer(user_input, return_tensors="pt").input_ids
|
57 |
+
attention_mask = tokenizer(user_input, return_tensors="pt").attention_mask
|
58 |
+
|
59 |
+
# RAG model: Generate response
|
60 |
+
output = rag_retriever(input_ids, attention_mask=attention_mask)
|
61 |
+
response = output.generator_outputs[0].sequences[0]
|
62 |
+
|
63 |
+
# Chat model: Refine response
|
64 |
+
chat_input = tokenizer(response, return_tensors="pt")
|
65 |
+
chat_input["input_ids"] = chat_input["input_ids"].unsqueeze(0)
|
66 |
+
chat_input["attention_mask"] = chat_input["attention_mask"].unsqueeze(0)
|
67 |
+
output = chat_model(**chat_input)
|
68 |
+
refined_response = output.sequences[0]
|
69 |
+
|
70 |
+
# Output pipeline: Return final response
|
71 |
+
return refined_response
|
72 |
+
|
73 |
+
def workspace_interface(project_name):
|
74 |
+
project_path = os.path.join(PROJECT_ROOT, project_name)
|
75 |
+
if os.path.exists(project_path):
|
76 |
+
return f"Project '{project_name}' already exists."
|
77 |
+
else:
|
78 |
+
os.makedirs(project_path)
|
79 |
+
st.session_state.workspace_projects[project_name] = {'files': []}
|
80 |
+
return f"Project '{project_name}' created successfully."
|
81 |
+
|
82 |
+
def add_code_to_workspace(project_name, code, file_name):
|
83 |
+
project_path = os.path.join(PROJECT_ROOT, project_name)
|
84 |
+
if not os.path.exists(project_path):
|
85 |
+
return f"Project '{project_name}' does not exist."
|
86 |
+
|
87 |
+
file_path = os.path.join(project_path, file_name)
|
|
|
|
|
|
|
|
|
|
|
88 |
try:
|
89 |
+
with open(file_path, "w") as file:
|
90 |
+
file.write(code)
|
91 |
+
st.session_state.workspace_projects[project_name]['files'].append(file_name)
|
92 |
+
return f"Code added to '{file_name}' in project '{project_name}'."
|
93 |
+
except Exception as e:
|
94 |
+
logging.error(f"Error adding code: {file_name}: {e}")
|
95 |
+
return f"Error adding code: {file_name}"
|
96 |
+
|
97 |
+
def run_code(command, project_name=None):
|
98 |
+
if project_name:
|
99 |
+
project_path = os.path.join(PROJECT_ROOT, project_name)
|
100 |
+
result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_path)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
else:
|
102 |
+
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
103 |
+
return result.stdout
|
104 |
+
|
105 |
+
def display_chat_history(history):
|
106 |
+
chat_history = ""
|
107 |
+
for user_input, response in history:
|
108 |
+
chat_history += f"User: {user_input}\nAgent: {response}\n\n"
|
109 |
+
return chat_history
|
110 |
+
|
111 |
+
def display_workspace_projects(projects):
|
112 |
+
workspace_projects = ""
|
113 |
+
for project, details in projects.items():
|
114 |
+
workspace_projects += f"Project: {project}\nFiles:\n"
|
115 |
+
for file in details['files']:
|
116 |
+
workspace_projects += f" - {file}\n"
|
117 |
+
return workspace_projects
|
118 |
+
|
119 |
+
def download_models():
|
120 |
+
for model in AVAILABLE_CODE_GENERATIVE_MODELS:
|
121 |
+
try:
|
122 |
+
cached_model = cached_download(model)
|
123 |
+
logging.info(f"Downloaded model '{model}' successfully.")
|
124 |
+
except Exception as e:
|
125 |
+
logging.error(f"Error downloading model '{model}': {e}")
|
126 |
+
|
127 |
+
def deploy_space_to_hf(project_name, hf_token):
|
128 |
+
repository_name = f"my-awesome-space_{datetime.now().timestamp()}"
|
129 |
+
files = get_built_space_files()
|
130 |
+
commit_response = deploy_to_git(project_name, repository_name, files)
|
131 |
+
if commit_response:
|
132 |
+
publish_space(repository_name, hf_token)
|
133 |
+
return f"Space '{repository_name}' deployed successfully."
|
134 |
+
else:
|
135 |
+
return "Failed to commit changes to Space."
|
136 |
+
|
137 |
+
def get_built_space_files():
|
138 |
+
projects = st.session_state.workspace_projects
|
139 |
+
files = []
|
140 |
+
for project in projects.values():
|
141 |
+
for file in project['files']:
|
142 |
+
file_path = os.path.join(PROJECT_ROOT, project['project_name'], file)
|
143 |
+
with open(file_path, "rb") as file:
|
144 |
+
files.append(file.read())
|
145 |
+
return files
|
146 |
+
|
147 |
+
def deploy_to_git(project_name, repository_name, files):
|
148 |
+
project_path = os.path.join(PROJECT_ROOT, project_name)
|
149 |
+
git_repo_url = hf_hub_url(repository_name)
|
150 |
+
git = subprocess.Popen(["git", "init"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path)
|
151 |
+
git.communicate()
|
152 |
+
|
153 |
+
git = subprocess.Popen(["git", "add", "-A"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path)
|
154 |
+
git.communicate()
|
155 |
+
|
156 |
+
for file in files:
|
157 |
+
filename = "temp.txt"
|
158 |
+
with open("temp.txt", "wb") as temp_file:
|
159 |
+
temp_file.write(file)
|
160 |
+
git = subprocess.Popen(["git", "add", filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path)
|
161 |
+
git.communicate()
|
162 |
+
os.remove("temp.txt")
|
163 |
+
|
164 |
+
git = subprocess.Popen(["git", "commit", "-m", "Initial commit"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path)
|
165 |
+
git.communicate()
|
166 |
+
|
167 |
+
return git.returncode == 0
|
168 |
+
|
169 |
+
def publish_space(repository_name, hf_token):
|
170 |
+
api = HfApi(token=hf_token)
|
171 |
+
api.create_model(repository_name, files=[], push_to_hub=True)
|
172 |
+
|
173 |
+
def handle_autonomous_build():
|
174 |
+
if not st.session_state.workspace_projects or not st.session_state.available_agents:
|
175 |
+
st.error("No projects or agents available to build.")
|
176 |
+
return
|
177 |
+
|
178 |
+
project_name = st.session_state.workspace_projects.keys()[0]
|
179 |
+
selected_agent = st.session_state.available_agents[0]
|
180 |
+
code_idea = st.session_state.current_state["workspace_chat"]["user_input"]
|
181 |
+
code_generative_model = next((model for model in AVAILABLE_CODE_GENERATIVE_MODELS if model in st.session_state.current_state["toolbox"]["selected_models"]), None)
|
182 |
+
|
183 |
+
if not code_generative_model:
|
184 |
+
st.error("No code-generative model selected.")
|
185 |
+
return
|
186 |
+
|
187 |
+
logging.info(f"Building project '{project_name}' with agent '{selected_agent}' and model '{code_generative_model}'.")
|
188 |
+
|
189 |
try:
|
190 |
+
# TODO: Add code to run the build process here
|
191 |
+
# This could include generating code, running it, and updating the workspace projects
|
192 |
+
# The build process should also update the UI with the build summary and next steps
|
193 |
+
summary, next_step = build_project(project_name, selected_agent, code_idea, code_generative_model)
|
194 |
+
st.write(f"Build summary: {summary}")
|
195 |
+
st.write(f"Next step: {next_step}")
|
196 |
+
|
197 |
+
if next_step == "Deploy to Hugging Face Hub":
|
198 |
+
deploy_response = deploy_space_to_hf(project_name, HF_TOKEN)
|
199 |
+
st.write(deploy_response)
|
200 |
+
except Exception as e:
|
201 |
+
logging.error(f"Error during build process: {e}")
|
202 |
+
st.error("Error during build process.")
|
203 |
+
|
204 |
+
def build_project(project_name, agent, code_idea, code_generative_model):
|
205 |
+
# TODO: Add code to build the project here
|
206 |
+
# This could include generating code, running it, and updating the workspace projects
|
207 |
+
# The build process should also return a summary and next step
|
208 |
+
summary = "Project built successfully."
|
209 |
+
next_step = ""
|
210 |
+
return summary, next_step
|
211 |
+
|
212 |
+
def main():
|
213 |
+
# Initialize the app
|
214 |
+
st.title("AI Agent Creator")
|
215 |
+
|
216 |
+
# Sidebar navigation
|
217 |
+
st.sidebar.title("Navigation")
|
218 |
+
app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
|
219 |
+
|
220 |
+
if app_mode == "AI Agent Creator":
|
221 |
+
# AI Agent Creator
|
222 |
+
st.header("Create an AI Agent from Text")
|
223 |
+
|
224 |
+
st.subheader("From Text")
|
225 |
+
agent_name = st.text_input("Enter agent name:")
|
226 |
+
text_input = st.text_area("Enter skills (one per line):")
|
227 |
+
if st.button("Create Agent"):
|
228 |
+
skills = text_input.split('\n')
|
229 |
+
try:
|
230 |
+
agent = AIAgent(agent_name, "AI agent created from text input", skills)
|
231 |
+
st.session_state.available_agents.append(agent_name)
|
232 |
+
st.success(f"Agent '{agent_name}' created and saved successfully.")
|
233 |
+
except Exception as e:
|
234 |
+
st.error(f"Error creating agent: {e}")
|
235 |
+
|
236 |
+
elif app_mode == "Tool Box":
|
237 |
+
# Tool Box
|
238 |
+
st.header("AI-Powered Tools")
|
239 |
+
|
240 |
+
# Chat Interface
|
241 |
+
st.subheader("Chat with CodeCraft")
|
242 |
+
chat_input = st.text_area("Enter your message:")
|
243 |
+
if st.button("Send"):
|
244 |
+
response = process_input(chat_input)
|
245 |
+
st.session_state.chat_history.append((chat_input, response))
|
246 |
+
st.write(f"CodeCraft: {response}")
|
247 |
+
|
248 |
+
# Terminal Interface
|
249 |
+
st.subheader("Terminal")
|
250 |
+
terminal_input = st.text_input("Enter a command:")
|
251 |
+
if st.button("Run"):
|
252 |
+
output = run_code(terminal_input)
|
253 |
+
st.session_state.terminal_history.append((terminal_input, output))
|
254 |
+
st.code(output, language="bash")
|
255 |
+
|
256 |
+
# Project Management
|
257 |
+
st.subheader("Project Management")
|
258 |
+
project_name_input = st.text_input("Enter Project Name:")
|
259 |
+
if st.button("Create Project"):
|
260 |
+
status = workspace_interface(project_name_input)
|
261 |
+
st.write(status)
|
262 |
+
|
263 |
+
code_to_add = st.text_area("Enter Code to Add to Workspace:", height=150)
|
264 |
+
file_name_input = st.text_input("Enter File Name (e.g., 'app.py'):")
|
265 |
+
if st.button("Add Code"):
|
266 |
+
status = add_code_to_workspace(project_name_input, code_to_add, file_name_input)
|
267 |
+
st.write(status)
|
268 |
+
|
269 |
+
# Display Chat History
|
270 |
+
st.subheader("Chat History")
|
271 |
+
chat_history = display_chat_history(st.session_state.chat_history)
|
272 |
+
st.text_area("Chat History", value=chat_history, height=200)
|
273 |
+
|
274 |
+
# Display Workspace Projects
|
275 |
+
st.subheader("Workspace Projects")
|
276 |
+
workspace_projects = display_workspace_projects(st.session_state.workspace_projects)
|
277 |
+
st.text_area("Workspace Projects", value=workspace_projects, height=200)
|
278 |
+
|
279 |
+
# Download and deploy models
|
280 |
+
if st.button("Download and Deploy Models"):
|
281 |
+
download_models()
|
282 |
+
st.info("Models downloaded and deployed.")
|
283 |
+
|
284 |
+
elif app_mode == "Workspace Chat App":
|
285 |
+
# Workspace Chat App
|
286 |
+
st.header("Workspace Chat App")
|
287 |
+
|
288 |
+
# Chat Interface with AI Agents
|
289 |
+
st.subheader("Chat with AI Agents")
|
290 |
+
selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
|
291 |
+
agent_chat_input = st.text_area("Enter your message for the agent:")
|
292 |
+
if st.button("Send to Agent"):
|
293 |
+
response = process_input(agent_chat_input)
|
294 |
+
st.session_state.chat_history.append((agent_chat_input, response))
|
295 |
+
st.write(f"{selected_agent}: {response}")
|
296 |
+
|
297 |
+
# Code Generation
|
298 |
+
st.subheader("Code Generation")
|
299 |
+
code_idea = st.text_input("Enter your code idea:")
|
300 |
+
selected_model = st.selectbox("Select a code-generative model", AVAILABLE_CODE_GENERATIVE_MODELS)
|
301 |
+
if st.button("Generate Code"):
|
302 |
+
generated_code = run_code(code_idea)
|
303 |
+
st.code(generated_code, language="python")
|
304 |
+
|
305 |
+
# Autonomous build process
|
306 |
+
if st.button("Automate Build Process"):
|
307 |
+
handle_autonomous_build()
|
308 |
+
|
309 |
+
if __name__ == "__main__":
|
310 |
+
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|