Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -5,12 +5,9 @@ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
|
|
5 |
import black
|
6 |
from pylint import lint
|
7 |
from io import StringIO
|
8 |
-
import
|
9 |
import sys
|
10 |
|
11 |
-
# Set your OpenAI API key here
|
12 |
-
openai.api_key = "YOUR_OPENAI_API_KEY"
|
13 |
-
|
14 |
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
|
15 |
PROJECT_ROOT = "projects"
|
16 |
AGENT_DIRECTORY = "agents"
|
@@ -38,20 +35,25 @@ class AIAgent:
|
|
38 |
|
39 |
def create_agent_prompt(self):
|
40 |
skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
|
41 |
-
agent_prompt =
|
|
|
42 |
As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
|
43 |
{skills_str}
|
44 |
-
|
45 |
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
|
46 |
"""
|
|
|
47 |
return agent_prompt
|
48 |
|
49 |
def autonomous_build(self, chat_history, workspace_projects):
|
50 |
"""
|
51 |
Autonomous build logic that continues based on the state of chat history and workspace projects.
|
52 |
"""
|
53 |
-
summary =
|
54 |
-
|
|
|
|
|
|
|
|
|
55 |
|
56 |
# Analyze chat history and workspace projects to suggest actions
|
57 |
# Example:
|
@@ -64,7 +66,9 @@ I am confident that I can leverage my expertise to assist you in developing and
|
|
64 |
# - Check if the user has requested to analyze sentiment
|
65 |
|
66 |
# Generate a response based on the analysis
|
67 |
-
next_step =
|
|
|
|
|
68 |
|
69 |
return summary, next_step
|
70 |
|
@@ -106,8 +110,10 @@ def chat_interface_with_agent(input_text, agent_name):
|
|
106 |
except EnvironmentError as e:
|
107 |
return f"Error loading model: {e}"
|
108 |
|
109 |
-
combined_input =
|
110 |
-
|
|
|
|
|
111 |
input_ids = tokenizer.encode(combined_input, return_tensors="pt")
|
112 |
max_input_length = 900
|
113 |
if input_ids.shape[1] > max_input_length:
|
@@ -152,32 +158,28 @@ def code_editor_interface(code):
|
|
152 |
|
153 |
# Text summarization tool
|
154 |
def summarize_text(text):
|
155 |
-
summarizer = pipeline("summarization")
|
156 |
summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
|
157 |
return summary[0]['summary_text']
|
158 |
|
159 |
# Sentiment analysis tool
|
160 |
def sentiment_analysis(text):
|
161 |
-
analyzer = pipeline("sentiment-analysis")
|
162 |
result = analyzer(text)
|
163 |
return result[0]['label']
|
164 |
|
165 |
# Text translation tool (code translation)
|
166 |
def translate_code(code, source_language, target_language):
|
167 |
-
# Use a Hugging Face translation model instead of OpenAI
|
168 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es") # Example: English to Spanish
|
169 |
translated_code = translator(code, target_lang=target_language)[0]['translation_text']
|
170 |
return translated_code
|
171 |
|
172 |
def generate_code(code_idea):
|
173 |
-
# Use a Hugging Face code generation model instead of OpenAI
|
174 |
generator = pipeline('text-generation', model='bigcode/starcoder')
|
175 |
generated_code = generator(code_idea, max_length=1000, num_return_sequences=1)[0]['generated_text']
|
176 |
return generated_code
|
177 |
|
178 |
def chat_interface(input_text):
|
179 |
-
"""Handles general chat interactions with the user."""
|
180 |
-
# Use a Hugging Face chatbot model or your own logic
|
181 |
chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium")
|
182 |
response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
|
183 |
return response
|
@@ -197,7 +199,7 @@ def add_code_to_workspace(project_name, code, file_name):
|
|
197 |
project_path = os.path.join(PROJECT_ROOT, project_name)
|
198 |
if not os.path.exists(project_path):
|
199 |
return f"Project '{project_name}' does not exist."
|
200 |
-
|
201 |
file_path = os.path.join(project_path, file_name)
|
202 |
with open(file_path, "w") as file:
|
203 |
file.write(code)
|
@@ -233,7 +235,7 @@ elif app_mode == "Tool Box":
|
|
233 |
if st.button("Send"):
|
234 |
chat_response = chat_interface(chat_input)
|
235 |
st.session_state.chat_history.append((chat_input, chat_response))
|
236 |
-
st.write(f"CodeCraft: {chat_response}")
|
237 |
|
238 |
# Terminal Interface
|
239 |
st.subheader("Terminal")
|
@@ -256,14 +258,14 @@ elif app_mode == "Tool Box":
|
|
256 |
text_to_summarize = st.text_area("Enter text to summarize:")
|
257 |
if st.button("Summarize"):
|
258 |
summary = summarize_text(text_to_summarize)
|
259 |
-
st.write(f"Summary: {summary}")
|
260 |
|
261 |
# Sentiment Analysis Tool
|
262 |
st.subheader("Sentiment Analysis")
|
263 |
sentiment_text = st.text_area("Enter text for sentiment analysis:")
|
264 |
if st.button("Analyze Sentiment"):
|
265 |
sentiment = sentiment_analysis(sentiment_text)
|
266 |
-
st.write(f"Sentiment: {sentiment}")
|
267 |
|
268 |
# Text Translation Tool (Code Translation)
|
269 |
st.subheader("Translate Code")
|
@@ -313,26 +315,26 @@ elif app_mode == "Workspace Chat App":
|
|
313 |
if st.button("Get Guidance"):
|
314 |
chat_response = chat_interface(chat_input)
|
315 |
st.session_state.chat_history.append((chat_input, chat_response))
|
316 |
-
st.write(f"CodeCraft: {chat_response}")
|
317 |
|
318 |
# Display Chat History
|
319 |
st.subheader("Chat History")
|
320 |
for user_input, response in st.session_state.chat_history:
|
321 |
-
st.write(f"User: {user_input}")
|
322 |
-
st.write(f"CodeCraft: {response}")
|
323 |
|
324 |
# Display Terminal History
|
325 |
st.subheader("Terminal History")
|
326 |
for command, output in st.session_state.terminal_history:
|
327 |
-
st.write(f"Command: {command}")
|
328 |
st.code(output, language="bash")
|
329 |
|
330 |
# Display Projects and Files
|
331 |
st.subheader("Workspace Projects")
|
332 |
for project, details in st.session_state.workspace_projects.items():
|
333 |
-
st.write(f"Project: {project}")
|
334 |
for file in details['files']:
|
335 |
-
st.write(f" - {file}")
|
336 |
|
337 |
# Chat with AI Agents
|
338 |
st.subheader("Chat with AI Agents")
|
@@ -341,7 +343,7 @@ elif app_mode == "Workspace Chat App":
|
|
341 |
if st.button("Send to Agent"):
|
342 |
agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
|
343 |
st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
|
344 |
-
st.write(f"{selected_agent}: {agent_chat_response}")
|
345 |
|
346 |
# Automate Build Process
|
347 |
st.subheader("Automate Build Process")
|
@@ -351,4 +353,4 @@ elif app_mode == "Workspace Chat App":
|
|
351 |
st.write("Autonomous Build Summary:")
|
352 |
st.write(summary)
|
353 |
st.write("Next Step:")
|
354 |
-
st.write(next_step)
|
|
|
5 |
import black
|
6 |
from pylint import lint
|
7 |
from io import StringIO
|
8 |
+
import together
|
9 |
import sys
|
10 |
|
|
|
|
|
|
|
11 |
HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
|
12 |
PROJECT_ROOT = "projects"
|
13 |
AGENT_DIRECTORY = "agents"
|
|
|
35 |
|
36 |
def create_agent_prompt(self):
|
37 |
skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
|
38 |
+
agent_prompt = (
|
39 |
+
f"""
|
40 |
As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
|
41 |
{skills_str}
|
|
|
42 |
I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
|
43 |
"""
|
44 |
+
)
|
45 |
return agent_prompt
|
46 |
|
47 |
def autonomous_build(self, chat_history, workspace_projects):
|
48 |
"""
|
49 |
Autonomous build logic that continues based on the state of chat history and workspace projects.
|
50 |
"""
|
51 |
+
summary = (
|
52 |
+
"Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
|
53 |
+
)
|
54 |
+
summary += (
|
55 |
+
"\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
|
56 |
+
)
|
57 |
|
58 |
# Analyze chat history and workspace projects to suggest actions
|
59 |
# Example:
|
|
|
66 |
# - Check if the user has requested to analyze sentiment
|
67 |
|
68 |
# Generate a response based on the analysis
|
69 |
+
next_step = (
|
70 |
+
"Based on the current state, the next logical step is to implement the main application logic."
|
71 |
+
)
|
72 |
|
73 |
return summary, next_step
|
74 |
|
|
|
110 |
except EnvironmentError as e:
|
111 |
return f"Error loading model: {e}"
|
112 |
|
113 |
+
combined_input = (
|
114 |
+
f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
|
115 |
+
)
|
116 |
+
|
117 |
input_ids = tokenizer.encode(combined_input, return_tensors="pt")
|
118 |
max_input_length = 900
|
119 |
if input_ids.shape[1] > max_input_length:
|
|
|
158 |
|
159 |
# Text summarization tool
|
160 |
def summarize_text(text):
|
161 |
+
summarizer = pipeline("summarization", model="t5-base")
|
162 |
summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
|
163 |
return summary[0]['summary_text']
|
164 |
|
165 |
# Sentiment analysis tool
|
166 |
def sentiment_analysis(text):
|
167 |
+
analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
|
168 |
result = analyzer(text)
|
169 |
return result[0]['label']
|
170 |
|
171 |
# Text translation tool (code translation)
|
172 |
def translate_code(code, source_language, target_language):
|
|
|
173 |
translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es") # Example: English to Spanish
|
174 |
translated_code = translator(code, target_lang=target_language)[0]['translation_text']
|
175 |
return translated_code
|
176 |
|
177 |
def generate_code(code_idea):
|
|
|
178 |
generator = pipeline('text-generation', model='bigcode/starcoder')
|
179 |
generated_code = generator(code_idea, max_length=1000, num_return_sequences=1)[0]['generated_text']
|
180 |
return generated_code
|
181 |
|
182 |
def chat_interface(input_text):
|
|
|
|
|
183 |
chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium")
|
184 |
response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
|
185 |
return response
|
|
|
199 |
project_path = os.path.join(PROJECT_ROOT, project_name)
|
200 |
if not os.path.exists(project_path):
|
201 |
return f"Project '{project_name}' does not exist."
|
202 |
+
|
203 |
file_path = os.path.join(project_path, file_name)
|
204 |
with open(file_path, "w") as file:
|
205 |
file.write(code)
|
|
|
235 |
if st.button("Send"):
|
236 |
chat_response = chat_interface(chat_input)
|
237 |
st.session_state.chat_history.append((chat_input, chat_response))
|
238 |
+
st.write((f"CodeCraft: {chat_response}"))
|
239 |
|
240 |
# Terminal Interface
|
241 |
st.subheader("Terminal")
|
|
|
258 |
text_to_summarize = st.text_area("Enter text to summarize:")
|
259 |
if st.button("Summarize"):
|
260 |
summary = summarize_text(text_to_summarize)
|
261 |
+
st.write((f"Summary: {summary}"))
|
262 |
|
263 |
# Sentiment Analysis Tool
|
264 |
st.subheader("Sentiment Analysis")
|
265 |
sentiment_text = st.text_area("Enter text for sentiment analysis:")
|
266 |
if st.button("Analyze Sentiment"):
|
267 |
sentiment = sentiment_analysis(sentiment_text)
|
268 |
+
st.write((f"Sentiment: {sentiment}"))
|
269 |
|
270 |
# Text Translation Tool (Code Translation)
|
271 |
st.subheader("Translate Code")
|
|
|
315 |
if st.button("Get Guidance"):
|
316 |
chat_response = chat_interface(chat_input)
|
317 |
st.session_state.chat_history.append((chat_input, chat_response))
|
318 |
+
st.write((f"CodeCraft: {chat_response}"))
|
319 |
|
320 |
# Display Chat History
|
321 |
st.subheader("Chat History")
|
322 |
for user_input, response in st.session_state.chat_history:
|
323 |
+
st.write((f"User: {user_input}"))
|
324 |
+
st.write((f"CodeCraft: {response}"))
|
325 |
|
326 |
# Display Terminal History
|
327 |
st.subheader("Terminal History")
|
328 |
for command, output in st.session_state.terminal_history:
|
329 |
+
st.write((f"Command: {command}"))
|
330 |
st.code(output, language="bash")
|
331 |
|
332 |
# Display Projects and Files
|
333 |
st.subheader("Workspace Projects")
|
334 |
for project, details in st.session_state.workspace_projects.items():
|
335 |
+
st.write((f"Project: {project}"))
|
336 |
for file in details['files']:
|
337 |
+
st.write((f" - {file}"))
|
338 |
|
339 |
# Chat with AI Agents
|
340 |
st.subheader("Chat with AI Agents")
|
|
|
343 |
if st.button("Send to Agent"):
|
344 |
agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
|
345 |
st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
|
346 |
+
st.write((f"{selected_agent}: {agent_chat_response}"))
|
347 |
|
348 |
# Automate Build Process
|
349 |
st.subheader("Automate Build Process")
|
|
|
353 |
st.write("Autonomous Build Summary:")
|
354 |
st.write(summary)
|
355 |
st.write("Next Step:")
|
356 |
+
st.write(next_step)
|