acecalisto3 commited on
Commit
58ddfa5
·
verified ·
1 Parent(s): e59044b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +610 -73
app.py CHANGED
@@ -1,349 +1,886 @@
1
  import os
 
 
 
2
  import subprocess
 
3
  import streamlit as st
4
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer, AutoConfig, AutoModel
5
 
6
- HF_TOKEN = os.getenv("HF_TOKEN")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
 
8
  PROJECT_ROOT = "projects"
 
9
  AGENT_DIRECTORY = "agents"
10
 
 
 
 
11
  # Global state to manage communication between Tool Box and Workspace Chat App
 
12
  if 'chat_history' not in st.session_state:
 
13
  st.session_state.chat_history = []
 
14
  if 'terminal_history' not in st.session_state:
 
15
  st.session_state.terminal_history = []
 
16
  if 'workspace_projects' not in st.session_state:
 
17
  st.session_state.workspace_projects = {}
 
18
  if 'available_agents' not in st.session_state:
 
19
  st.session_state.available_agents = []
 
20
  if 'current_state' not in st.session_state:
 
21
  st.session_state.current_state = {
 
22
  'toolbox': {},
 
23
  'workspace_chat': {}
 
24
  }
25
 
 
 
 
26
  class AIAgent:
 
27
  def __init__(self, name, description, skills):
 
28
  self.name = name
 
29
  self.description = description
 
30
  self.skills = skills
31
 
 
 
 
32
  def create_agent_prompt(self):
 
33
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
 
34
  agent_prompt = f"""
 
35
  As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
 
36
  {skills_str}
 
 
 
37
  I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
 
38
  """
 
39
  return agent_prompt
40
 
 
 
 
41
  def autonomous_build(self, chat_history, workspace_projects):
 
42
  """
 
43
  Autonomous build logic that continues based on the state of chat history and workspace projects.
 
44
  """
 
45
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
 
46
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
47
 
48
- # Analyze chat history and workspace projects to suggest actions
49
- # Example:
50
- # - Check if the user has requested to create a new file
51
- # - Check if the user has requested to install a package
52
- # - Check if the user has requested to run a command
53
- # - Check if the user has requested to generate code
54
- # - Check if the user has requested to translate code
55
- # - Check if the user has requested to summarize text
56
- # - Check if the user has requested to analyze sentiment
57
-
58
- # Generate a response based on the analysis
59
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
60
 
 
 
 
61
  return summary, next_step
62
 
63
- def load_hf_token():
64
- return 'YOUR_HF_TOKEN'
65
 
66
  def save_agent_to_file(agent):
67
- """Saves the agent's prompt to a file."""
 
 
68
  if not os.path.exists(AGENT_DIRECTORY):
 
69
  os.makedirs(AGENT_DIRECTORY)
 
70
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
 
 
 
71
  with open(file_path, "w") as file:
 
72
  file.write(agent.create_agent_prompt())
 
 
 
 
 
73
  st.session_state.available_agents.append(agent.name)
74
 
 
 
 
 
 
 
 
 
75
  def load_agent_prompt(agent_name):
 
76
  """Loads an agent prompt from a file."""
 
77
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
 
78
  if os.path.exists(file_path):
 
79
  with open(file_path, "r") as file:
 
80
  agent_prompt = file.read()
 
81
  return agent_prompt
 
82
  else:
 
83
  return None
84
 
 
 
 
85
  def create_agent_from_text(name, text):
 
86
  skills = text.split('\n')
 
87
  agent = AIAgent(name, "AI agent created from text input.", skills)
 
88
  save_agent_to_file(agent)
 
89
  return agent.create_agent_prompt()
90
 
 
 
 
 
 
91
  def chat_interface_with_agent(input_text, agent_name):
 
92
  agent_prompt = load_agent_prompt(agent_name)
 
93
  if agent_prompt is None:
 
94
  return f"Agent {agent_name} not found."
95
 
96
- model_name = "Bin12345/AutoCoder_S_6.7B"
 
 
 
 
 
 
97
  try:
98
- model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=load_hf_token())
99
- tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=load_hf_token())
 
 
 
100
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
 
101
  except EnvironmentError as e:
 
102
  return f"Error loading model: {e}"
103
 
 
 
 
 
 
104
  combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
 
105
 
106
- input_ids = tokenizer.encode(combined_input, return_tensors="pt")
 
 
107
  max_input_length = 900
 
 
 
108
  if input_ids.shape[1] > max_input_length:
 
109
  input_ids = input_ids[:, :max_input_length]
110
 
 
 
 
 
 
111
  outputs = model.generate(
112
- input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True,
113
- pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
 
114
  )
 
115
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
 
116
  return response
117
 
118
- # Terminal interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  def terminal_interface(command, project_name=None):
 
120
  if project_name:
 
121
  project_path = os.path.join(PROJECT_ROOT, project_name)
 
122
  if not os.path.exists(project_path):
 
123
  return f"Project {project_name} does not exist."
124
- result = subprocess.run(command, shell=True, capture_output=True, text=True, cwd=project_path)
 
 
125
  else:
 
126
  result = subprocess.run(command, shell=True, capture_output=True, text=True)
127
- return result.stdout
128
 
129
- # Code editor interface
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  def code_editor_interface(code):
 
131
  try:
 
132
  formatted_code = black.format_str(code, mode=black.FileMode())
 
133
  except black.NothingChanged:
 
134
  formatted_code = code
135
 
136
  result = StringIO()
 
137
  sys.stdout = result
 
138
  sys.stderr = result
139
 
140
  (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
 
141
  sys.stdout = sys.__stdout__
 
142
  sys.stderr = sys.__stderr__
143
 
144
  lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
145
 
 
 
 
 
146
  return formatted_code, lint_message
147
 
148
- # Text summarization tool
 
 
149
  def summarize_text(text):
150
- summarizer = pipeline("summarization", model="t5-base", use_auth_token=load_hf_token())
151
- summary = summarizer(text, max_length=130, min_length=30, do_sample=False)
 
 
 
 
 
152
  return summary[0]['summary_text']
153
 
154
- # Sentiment analysis tool
 
 
155
  def sentiment_analysis(text):
156
- analyzer = pipeline("sentiment-analysis", model="cardiffnlp/twitter-roberta-base-sentiment", use_auth_token=load_hf_token())
157
- result = analyzer(text)
158
- return result[0]['label']
159
-
160
- # Text translation tool (code translation)
161
- def translate_code(code, source_language, target_language):
162
- # Use a Hugging Face translation model instead of OpenAI
163
- translator = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es", use_auth_token=load_hf_token()) # Example: English to Spanish
164
- translated_code = translator(code, target_lang=target_language)[0]['translation_text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165
  return translated_code
166
 
 
 
 
167
  def generate_code(code_idea):
168
- # Use a Hugging Face code generation model instead of OpenAI
169
- generator = pipeline('text-generation', model='bigcode/starcoder', use_auth_token=load_hf_token())
170
- generated_code = generator(code_idea, max_length=1000, num_return_sequences=1)[0]['generated_text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171
  return generated_code
172
 
173
- def chat_interface(input_text):
174
- """Handles general chat interactions with the user."""
175
- # Use a Hugging Face chatbot model or your own logic
176
- chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium", use_auth_token=load_hf_token())
177
- response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
178
- return response
179
 
180
- # Workspace interface
181
- def workspace_interface(project_name):
182
- project_path = os.path.join(PROJECT_ROOT, project_name)
183
- if not os.path.exists(project_path):
184
- os.makedirs(project_path)
185
- st.session_state.workspace_projects[project_name] = {'files': []}
186
- return f"Project '{project_name}' created successfully."
187
- else:
188
- return f"Project '{project_name}' already exists."
189
 
190
- # Add code to workspace
191
- def add_code_to_workspace(project_name, code, file_name):
192
- project_path = os.path.join(PROJECT_ROOT, project_name)
193
- if not os.path.exists(project_path):
194
- return f"Project '{project_name}' does not exist."
195
-
196
- file_path = os.path.join(project_path, file_name)
197
- with open(file_path, "w") as file:
198
- file.write(code)
199
- st.session_state.workspace_projects[project_name]['files'].append(file_name)
200
- return f"Code added to '{file_name}' in project '{project_name}'."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
  # Streamlit App
 
203
  st.title("AI Agent Creator")
204
 
 
 
 
205
  # Sidebar navigation
 
206
  st.sidebar.title("Navigation")
 
207
  app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
208
 
 
 
 
209
  if app_mode == "AI Agent Creator":
 
210
  # AI Agent Creator
 
211
  st.header("Create an AI Agent from Text")
212
 
 
 
 
213
  st.subheader("From Text")
 
214
  agent_name = st.text_input("Enter agent name:")
 
215
  text_input = st.text_area("Enter skills (one per line):")
 
216
  if st.button("Create Agent"):
 
217
  agent_prompt = create_agent_from_text(agent_name, text_input)
 
218
  st.success(f"Agent '{agent_name}' created and saved successfully.")
 
219
  st.session_state.available_agents.append(agent_name)
220
 
 
 
 
221
  elif app_mode == "Tool Box":
 
222
  # Tool Box
 
223
  st.header("AI-Powered Tools")
224
 
 
 
 
225
  # Chat Interface
 
226
  st.subheader("Chat with CodeCraft")
 
227
  chat_input = st.text_area("Enter your message:")
 
228
  if st.button("Send"):
229
- chat_response = chat_interface(chat_input)
 
 
 
 
 
 
 
 
 
 
 
 
230
  st.session_state.chat_history.append((chat_input, chat_response))
 
231
  st.write(f"CodeCraft: {chat_response}")
232
 
 
 
 
233
  # Terminal Interface
 
234
  st.subheader("Terminal")
 
235
  terminal_input = st.text_input("Enter a command:")
 
236
  if st.button("Run"):
 
237
  terminal_output = terminal_interface(terminal_input)
 
238
  st.session_state.terminal_history.append((terminal_input, terminal_output))
 
239
  st.code(terminal_output, language="bash")
240
 
 
 
 
241
  # Code Editor Interface
 
242
  st.subheader("Code Editor")
 
243
  code_editor = st.text_area("Write your code:", height=300)
 
244
  if st.button("Format & Lint"):
 
245
  formatted_code, lint_message = code_editor_interface(code_editor)
 
246
  st.code(formatted_code, language="python")
 
247
  st.info(lint_message)
248
 
 
 
 
249
  # Text Summarization Tool
 
250
  st.subheader("Summarize Text")
 
251
  text_to_summarize = st.text_area("Enter text to summarize:")
 
252
  if st.button("Summarize"):
 
253
  summary = summarize_text(text_to_summarize)
 
254
  st.write(f"Summary: {summary}")
255
 
 
 
 
256
  # Sentiment Analysis Tool
 
257
  st.subheader("Sentiment Analysis")
 
258
  sentiment_text = st.text_area("Enter text for sentiment analysis:")
 
259
  if st.button("Analyze Sentiment"):
 
260
  sentiment = sentiment_analysis(sentiment_text)
 
261
  st.write(f"Sentiment: {sentiment}")
262
 
 
 
 
263
  # Text Translation Tool (Code Translation)
 
264
  st.subheader("Translate Code")
 
265
  code_to_translate = st.text_area("Enter code to translate:")
266
- source_language = st.text_input("Enter source language (e.g., 'Python'):")
267
- target_language = st.text_input("Enter target language (e.g., 'JavaScript'):")
 
 
 
268
  if st.button("Translate Code"):
 
269
  translated_code = translate_code(code_to_translate, source_language, target_language)
 
270
  st.code(translated_code, language=target_language.lower())
271
 
 
 
 
272
  # Code Generation
 
273
  st.subheader("Code Generation")
 
274
  code_idea = st.text_input("Enter your code idea:")
 
275
  if st.button("Generate Code"):
 
276
  generated_code = generate_code(code_idea)
 
277
  st.code(generated_code, language="python")
278
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  elif app_mode == "Workspace Chat App":
 
280
  # Workspace Chat App
 
281
  st.header("Workspace Chat App")
282
 
 
 
 
283
  # Project Workspace Creation
 
284
  st.subheader("Create a New Project")
 
285
  project_name = st.text_input("Enter project name:")
 
286
  if st.button("Create Project"):
 
287
  workspace_status = workspace_interface(project_name)
 
288
  st.success(workspace_status)
289
 
 
 
 
290
  # Add Code to Workspace
 
291
  st.subheader("Add Code to Workspace")
 
292
  code_to_add = st.text_area("Enter code to add to workspace:")
293
- file_name = st.text_input("Enter file name (e.g., 'app.py'):")
 
 
294
  if st.button("Add Code"):
 
295
  add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
 
296
  st.success(add_code_status)
297
 
 
 
 
298
  # Terminal Interface with Project Context
 
299
  st.subheader("Terminal (Workspace Context)")
 
300
  terminal_input = st.text_input("Enter a command within the workspace:")
 
301
  if st.button("Run Command"):
 
302
  terminal_output = terminal_interface(terminal_input, project_name)
 
303
  st.code(terminal_output, language="bash")
304
 
 
 
 
305
  # Chat Interface for Guidance
 
306
  st.subheader("Chat with CodeCraft for Guidance")
 
307
  chat_input = st.text_area("Enter your message for guidance:")
 
308
  if st.button("Get Guidance"):
 
309
  chat_response = chat_interface(chat_input)
 
310
  st.session_state.chat_history.append((chat_input, chat_response))
 
311
  st.write(f"CodeCraft: {chat_response}")
312
 
 
 
 
313
  # Display Chat History
 
314
  st.subheader("Chat History")
 
315
  for user_input, response in st.session_state.chat_history:
 
316
  st.write(f"User: {user_input}")
 
317
  st.write(f"CodeCraft: {response}")
318
 
 
 
 
319
  # Display Terminal History
 
320
  st.subheader("Terminal History")
 
321
  for command, output in st.session_state.terminal_history:
 
322
  st.write(f"Command: {command}")
 
323
  st.code(output, language="bash")
324
 
 
 
 
325
  # Display Projects and Files
 
326
  st.subheader("Workspace Projects")
 
327
  for project, details in st.session_state.workspace_projects.items():
 
328
  st.write(f"Project: {project}")
 
329
  for file in details['files']:
 
330
  st.write(f" - {file}")
331
 
 
 
 
332
  # Chat with AI Agents
 
333
  st.subheader("Chat with AI Agents")
 
334
  selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
 
335
  agent_chat_input = st.text_area("Enter your message for the agent:")
 
336
  if st.button("Send to Agent"):
 
337
  agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
 
338
  st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
 
339
  st.write(f"{selected_agent}: {agent_chat_response}")
340
 
 
 
 
341
  # Automate Build Process
 
342
  st.subheader("Automate Build Process")
 
343
  if st.button("Automate"):
 
344
  agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
 
345
  summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
 
346
  st.write("Autonomous Build Summary:")
 
347
  st.write(summary)
 
348
  st.write("Next Step:")
349
- st.write(next_step)
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+
3
+ import sys
4
+
5
  import subprocess
6
+
7
  import streamlit as st
 
8
 
9
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
10
+
11
+ import black
12
+
13
+ from pylint import lint
14
+
15
+ from io import StringIO
16
+
17
+ import openai
18
+
19
+
20
+
21
+
22
+ # Set your OpenAI API key here
23
+
24
+ openai.api_key = "YOUR_OPENAI_API_KEY"
25
+
26
+
27
+
28
+
29
  HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
30
+
31
  PROJECT_ROOT = "projects"
32
+
33
  AGENT_DIRECTORY = "agents"
34
 
35
+
36
+
37
+
38
  # Global state to manage communication between Tool Box and Workspace Chat App
39
+
40
  if 'chat_history' not in st.session_state:
41
+
42
  st.session_state.chat_history = []
43
+
44
  if 'terminal_history' not in st.session_state:
45
+
46
  st.session_state.terminal_history = []
47
+
48
  if 'workspace_projects' not in st.session_state:
49
+
50
  st.session_state.workspace_projects = {}
51
+
52
  if 'available_agents' not in st.session_state:
53
+
54
  st.session_state.available_agents = []
55
+
56
  if 'current_state' not in st.session_state:
57
+
58
  st.session_state.current_state = {
59
+
60
  'toolbox': {},
61
+
62
  'workspace_chat': {}
63
+
64
  }
65
 
66
+
67
+
68
+
69
  class AIAgent:
70
+
71
  def __init__(self, name, description, skills):
72
+
73
  self.name = name
74
+
75
  self.description = description
76
+
77
  self.skills = skills
78
 
79
+
80
+
81
+
82
  def create_agent_prompt(self):
83
+
84
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
85
+
86
  agent_prompt = f"""
87
+
88
  As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
89
+
90
  {skills_str}
91
+
92
+
93
+
94
  I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
95
+
96
  """
97
+
98
  return agent_prompt
99
 
100
+
101
+
102
+
103
  def autonomous_build(self, chat_history, workspace_projects):
104
+
105
  """
106
+
107
  Autonomous build logic that continues based on the state of chat history and workspace projects.
108
+
109
  """
110
+
111
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
112
+
113
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
114
 
115
+
116
+
117
+
 
 
 
 
 
 
 
 
118
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
119
 
120
+
121
+
122
+
123
  return summary, next_step
124
 
125
+
126
+
127
 
128
  def save_agent_to_file(agent):
129
+
130
+ """Saves the agent's prompt to a file locally and then commits to the Hugging Face repository."""
131
+
132
  if not os.path.exists(AGENT_DIRECTORY):
133
+
134
  os.makedirs(AGENT_DIRECTORY)
135
+
136
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
137
+
138
+ config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
139
+
140
  with open(file_path, "w") as file:
141
+
142
  file.write(agent.create_agent_prompt())
143
+
144
+ with open(config_path, "w") as file:
145
+
146
+ file.write(f"Agent Name: {agent.name}\nDescription: {agent.description}")
147
+
148
  st.session_state.available_agents.append(agent.name)
149
 
150
+
151
+
152
+
153
+ commit_and_push_changes(f"Add agent {agent.name}")
154
+
155
+
156
+
157
+
158
  def load_agent_prompt(agent_name):
159
+
160
  """Loads an agent prompt from a file."""
161
+
162
  file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
163
+
164
  if os.path.exists(file_path):
165
+
166
  with open(file_path, "r") as file:
167
+
168
  agent_prompt = file.read()
169
+
170
  return agent_prompt
171
+
172
  else:
173
+
174
  return None
175
 
176
+
177
+
178
+
179
  def create_agent_from_text(name, text):
180
+
181
  skills = text.split('\n')
182
+
183
  agent = AIAgent(name, "AI agent created from text input.", skills)
184
+
185
  save_agent_to_file(agent)
186
+
187
  return agent.create_agent_prompt()
188
 
189
+
190
+
191
+
192
+ # Chat interface using a selected agent
193
+
194
  def chat_interface_with_agent(input_text, agent_name):
195
+
196
  agent_prompt = load_agent_prompt(agent_name)
197
+
198
  if agent_prompt is None:
199
+
200
  return f"Agent {agent_name} not found."
201
 
202
+
203
+
204
+
205
+ # Load the GPT-2 model which is compatible with AutoModelForCausalLM
206
+
207
+ model_name = "gpt2"
208
+
209
  try:
210
+
211
+ model = AutoModelForCausalLM.from_pretrained(model_name)
212
+
213
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
214
+
215
  generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
216
+
217
  except EnvironmentError as e:
218
+
219
  return f"Error loading model: {e}"
220
 
221
+
222
+
223
+
224
+ # Combine the agent prompt with user input
225
+
226
  combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
227
+
228
 
229
+
230
+ # Truncate input text to avoid exceeding the model's maximum length
231
+
232
  max_input_length = 900
233
+
234
+ input_ids = tokenizer.encode(combined_input, return_tensors="pt")
235
+
236
  if input_ids.shape[1] > max_input_length:
237
+
238
  input_ids = input_ids[:, :max_input_length]
239
 
240
+
241
+
242
+
243
+ # Generate chatbot response
244
+
245
  outputs = model.generate(
246
+
247
+ input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
248
+
249
  )
250
+
251
  response = tokenizer.decode(outputs[0], skip_special_tokens=True)
252
+
253
  return response
254
 
255
+
256
+
257
+
258
+ def workspace_interface(project_name):
259
+
260
+ project_path = os.path.join(PROJECT_ROOT, project_name)
261
+
262
+ if not os.path.exists(PROJECT_ROOT):
263
+
264
+ os.makedirs(PROJECT_ROOT)
265
+
266
+ if not os.path.exists(project_path):
267
+
268
+ os.makedirs(project_path)
269
+
270
+ st.session_state.workspace_projects[project_name] = {"files": []}
271
+
272
+ st.session_state.current_state['workspace_chat']['project_name'] = project_name
273
+
274
+ commit_and_push_changes(f"Create project {project_name}")
275
+
276
+ return f"Project {project_name} created successfully."
277
+
278
+ else:
279
+
280
+ return f"Project {project_name} already exists."
281
+
282
+
283
+
284
+
285
+ def add_code_to_workspace(project_name, code, file_name):
286
+
287
+ project_path = os.path.join(PROJECT_ROOT, project_name)
288
+
289
+ if os.path.exists(project_path):
290
+
291
+ file_path = os.path.join(project_path, file_name)
292
+
293
+ with open(file_path, "w") as file:
294
+
295
+ file.write(code)
296
+
297
+ st.session_state.workspace_projects[project_name]["files"].append(file_name)
298
+
299
+ st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
300
+
301
+ commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
302
+
303
+ return f"Code added to {file_name} in project {project_name} successfully."
304
+
305
+ else:
306
+
307
+ return f"Project {project_name} does not exist."
308
+
309
+
310
+
311
+
312
  def terminal_interface(command, project_name=None):
313
+
314
  if project_name:
315
+
316
  project_path = os.path.join(PROJECT_ROOT, project_name)
317
+
318
  if not os.path.exists(project_path):
319
+
320
  return f"Project {project_name} does not exist."
321
+
322
+ result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
323
+
324
  else:
325
+
326
  result = subprocess.run(command, shell=True, capture_output=True, text=True)
 
327
 
328
+ if result.returncode == 0:
329
+
330
+ st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
331
+
332
+ return result.stdout
333
+
334
+ else:
335
+
336
+ st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
337
+
338
+ return result.stderr
339
+
340
+
341
+
342
+
343
  def code_editor_interface(code):
344
+
345
  try:
346
+
347
  formatted_code = black.format_str(code, mode=black.FileMode())
348
+
349
  except black.NothingChanged:
350
+
351
  formatted_code = code
352
 
353
  result = StringIO()
354
+
355
  sys.stdout = result
356
+
357
  sys.stderr = result
358
 
359
  (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
360
+
361
  sys.stdout = sys.__stdout__
362
+
363
  sys.stderr = sys.__stderr__
364
 
365
  lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
366
 
367
+ st.session_state.current_state['toolbox']['formatted_code'] = formatted_code
368
+
369
+ st.session_state.current_state['toolbox']['lint_message'] = lint_message
370
+
371
  return formatted_code, lint_message
372
 
373
+
374
+
375
+
376
  def summarize_text(text):
377
+
378
+ summarizer = pipeline("summarization")
379
+
380
+ summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
381
+
382
+ st.session_state.current_state['toolbox']['summary'] = summary[0]['summary_text']
383
+
384
  return summary[0]['summary_text']
385
 
386
+
387
+
388
+
389
  def sentiment_analysis(text):
390
+
391
+ analyzer = pipeline("sentiment-analysis")
392
+
393
+ sentiment = analyzer(text)
394
+
395
+ st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
396
+
397
+ return sentiment[0]
398
+
399
+
400
+
401
+
402
+ def translate_code(code, input_language, output_language):
403
+
404
+ # Define a dictionary to map programming languages to their corresponding file extensions
405
+
406
+ language_extensions = {
407
+
408
+ # ignore the specific languages right now, and continue to EOF
409
+
410
+ }
411
+
412
+
413
+
414
+
415
+ # Add code to handle edge cases such as invalid input and unsupported programming languages
416
+
417
+ if input_language not in language_extensions:
418
+
419
+ raise ValueError(f"Invalid input language: {input_language}")
420
+
421
+ if output_language not in language_extensions:
422
+
423
+ raise ValueError(f"Invalid output language: {output_language}")
424
+
425
+
426
+
427
+
428
+ # Use the dictionary to map the input and output languages to their corresponding file extensions
429
+
430
+ input_extension = language_extensions[input_language]
431
+
432
+ output_extension = language_extensions[output_language]
433
+
434
+
435
+
436
+
437
+ # Translate the code using the OpenAI API
438
+
439
+ prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
440
+
441
+ response = openai.ChatCompletion.create(
442
+
443
+ model="gpt-4",
444
+
445
+ messages=[
446
+
447
+ {"role": "system", "content": "You are an expert software developer."},
448
+
449
+ {"role": "user", "content": prompt}
450
+
451
+ ]
452
+
453
+ )
454
+
455
+ translated_code = response.choices[0].message['content'].strip()
456
+
457
+
458
+
459
+
460
+ # Return the translated code
461
+
462
+ translated_code = response.choices[0].message['content'].strip()
463
+
464
+ st.session_state.current_state['toolbox']['translated_code'] = translated_code
465
+
466
  return translated_code
467
 
468
+
469
+
470
+
471
  def generate_code(code_idea):
472
+
473
+ response = openai.ChatCompletion.create(
474
+
475
+ model="gpt-4",
476
+
477
+ messages=[
478
+
479
+ {"role": "system", "content": "You are an expert software developer."},
480
+
481
+ {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
482
+
483
+ ]
484
+
485
+ )
486
+
487
+ generated_code = response.choices[0].message['content'].strip()
488
+
489
+ st.session_state.current_state['toolbox']['generated_code'] = generated_code
490
+
491
  return generated_code
492
 
 
 
 
 
 
 
493
 
 
 
 
 
 
 
 
 
 
494
 
495
+
496
+ def commit_and_push_changes(commit_message):
497
+
498
+ """Commits and pushes changes to the Hugging Face repository."""
499
+
500
+ commands = [
501
+
502
+ "git add .",
503
+
504
+ f"git commit -m '{commit_message}'",
505
+
506
+ "git push"
507
+
508
+ ]
509
+
510
+ for command in commands:
511
+
512
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
513
+
514
+ if result.returncode != 0:
515
+
516
+ st.error(f"Error executing command '{command}': {result.stderr}")
517
+
518
+ break
519
+
520
+
521
+
522
 
523
  # Streamlit App
524
+
525
  st.title("AI Agent Creator")
526
 
527
+
528
+
529
+
530
  # Sidebar navigation
531
+
532
  st.sidebar.title("Navigation")
533
+
534
  app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
535
 
536
+
537
+
538
+
539
  if app_mode == "AI Agent Creator":
540
+
541
  # AI Agent Creator
542
+
543
  st.header("Create an AI Agent from Text")
544
 
545
+
546
+
547
+
548
  st.subheader("From Text")
549
+
550
  agent_name = st.text_input("Enter agent name:")
551
+
552
  text_input = st.text_area("Enter skills (one per line):")
553
+
554
  if st.button("Create Agent"):
555
+
556
  agent_prompt = create_agent_from_text(agent_name, text_input)
557
+
558
  st.success(f"Agent '{agent_name}' created and saved successfully.")
559
+
560
  st.session_state.available_agents.append(agent_name)
561
 
562
+
563
+
564
+
565
  elif app_mode == "Tool Box":
566
+
567
  # Tool Box
568
+
569
  st.header("AI-Powered Tools")
570
 
571
+
572
+
573
+
574
  # Chat Interface
575
+
576
  st.subheader("Chat with CodeCraft")
577
+
578
  chat_input = st.text_area("Enter your message:")
579
+
580
  if st.button("Send"):
581
+
582
+ if chat_input.startswith("@"):
583
+
584
+ agent_name = chat_input.split(" ")[0][1:] # Extract agent_name from @agent_name
585
+
586
+ chat_input = " ".join(chat_input.split(" ")[1:]) # Remove agent_name from input
587
+
588
+ chat_response = chat_interface_with_agent(chat_input, agent_name)
589
+
590
+ else:
591
+
592
+ chat_response = chat_interface(chat_input)
593
+
594
  st.session_state.chat_history.append((chat_input, chat_response))
595
+
596
  st.write(f"CodeCraft: {chat_response}")
597
 
598
+
599
+
600
+
601
  # Terminal Interface
602
+
603
  st.subheader("Terminal")
604
+
605
  terminal_input = st.text_input("Enter a command:")
606
+
607
  if st.button("Run"):
608
+
609
  terminal_output = terminal_interface(terminal_input)
610
+
611
  st.session_state.terminal_history.append((terminal_input, terminal_output))
612
+
613
  st.code(terminal_output, language="bash")
614
 
615
+
616
+
617
+
618
  # Code Editor Interface
619
+
620
  st.subheader("Code Editor")
621
+
622
  code_editor = st.text_area("Write your code:", height=300)
623
+
624
  if st.button("Format & Lint"):
625
+
626
  formatted_code, lint_message = code_editor_interface(code_editor)
627
+
628
  st.code(formatted_code, language="python")
629
+
630
  st.info(lint_message)
631
 
632
+
633
+
634
+
635
  # Text Summarization Tool
636
+
637
  st.subheader("Summarize Text")
638
+
639
  text_to_summarize = st.text_area("Enter text to summarize:")
640
+
641
  if st.button("Summarize"):
642
+
643
  summary = summarize_text(text_to_summarize)
644
+
645
  st.write(f"Summary: {summary}")
646
 
647
+
648
+
649
+
650
  # Sentiment Analysis Tool
651
+
652
  st.subheader("Sentiment Analysis")
653
+
654
  sentiment_text = st.text_area("Enter text for sentiment analysis:")
655
+
656
  if st.button("Analyze Sentiment"):
657
+
658
  sentiment = sentiment_analysis(sentiment_text)
659
+
660
  st.write(f"Sentiment: {sentiment}")
661
 
662
+
663
+
664
+
665
  # Text Translation Tool (Code Translation)
666
+
667
  st.subheader("Translate Code")
668
+
669
  code_to_translate = st.text_area("Enter code to translate:")
670
+
671
+ source_language = st.text_input("Enter source language (e.g. 'Python'):")
672
+
673
+ target_language = st.text_input("Enter target language (e.g. 'JavaScript'):")
674
+
675
  if st.button("Translate Code"):
676
+
677
  translated_code = translate_code(code_to_translate, source_language, target_language)
678
+
679
  st.code(translated_code, language=target_language.lower())
680
 
681
+
682
+
683
+
684
  # Code Generation
685
+
686
  st.subheader("Code Generation")
687
+
688
  code_idea = st.text_input("Enter your code idea:")
689
+
690
  if st.button("Generate Code"):
691
+
692
  generated_code = generate_code(code_idea)
693
+
694
  st.code(generated_code, language="python")
695
 
696
+
697
+
698
+
699
+ # Display Preset Commands
700
+
701
+ st.subheader("Preset Commands")
702
+
703
+ preset_commands = {
704
+
705
+ "Create a new project": "create_project('project_name')",
706
+
707
+ "Add code to workspace": "add_code_to_workspace('project_name', 'code', 'file_name')",
708
+
709
+ "Run terminal command": "terminal_interface('command', 'project_name')",
710
+
711
+ "Generate code": "generate_code('code_idea')",
712
+
713
+ "Summarize text": "summarize_text('text')",
714
+
715
+ "Analyze sentiment": "sentiment_analysis('text')",
716
+
717
+ "Translate code": "translate_code('code', 'source_language', 'target_language')",
718
+
719
+ }
720
+
721
+ for command_name, command in preset_commands.items():
722
+
723
+ st.write(f"{command_name}: `{command}`")
724
+
725
+
726
+
727
+
728
  elif app_mode == "Workspace Chat App":
729
+
730
  # Workspace Chat App
731
+
732
  st.header("Workspace Chat App")
733
 
734
+
735
+
736
+
737
  # Project Workspace Creation
738
+
739
  st.subheader("Create a New Project")
740
+
741
  project_name = st.text_input("Enter project name:")
742
+
743
  if st.button("Create Project"):
744
+
745
  workspace_status = workspace_interface(project_name)
746
+
747
  st.success(workspace_status)
748
 
749
+
750
+
751
+
752
  # Add Code to Workspace
753
+
754
  st.subheader("Add Code to Workspace")
755
+
756
  code_to_add = st.text_area("Enter code to add to workspace:")
757
+
758
+ file_name = st.text_input("Enter file name (e.g. 'app.py'):")
759
+
760
  if st.button("Add Code"):
761
+
762
  add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
763
+
764
  st.success(add_code_status)
765
 
766
+
767
+
768
+
769
  # Terminal Interface with Project Context
770
+
771
  st.subheader("Terminal (Workspace Context)")
772
+
773
  terminal_input = st.text_input("Enter a command within the workspace:")
774
+
775
  if st.button("Run Command"):
776
+
777
  terminal_output = terminal_interface(terminal_input, project_name)
778
+
779
  st.code(terminal_output, language="bash")
780
 
781
+
782
+
783
+
784
  # Chat Interface for Guidance
785
+
786
  st.subheader("Chat with CodeCraft for Guidance")
787
+
788
  chat_input = st.text_area("Enter your message for guidance:")
789
+
790
  if st.button("Get Guidance"):
791
+
792
  chat_response = chat_interface(chat_input)
793
+
794
  st.session_state.chat_history.append((chat_input, chat_response))
795
+
796
  st.write(f"CodeCraft: {chat_response}")
797
 
798
+
799
+
800
+
801
  # Display Chat History
802
+
803
  st.subheader("Chat History")
804
+
805
  for user_input, response in st.session_state.chat_history:
806
+
807
  st.write(f"User: {user_input}")
808
+
809
  st.write(f"CodeCraft: {response}")
810
 
811
+
812
+
813
+
814
  # Display Terminal History
815
+
816
  st.subheader("Terminal History")
817
+
818
  for command, output in st.session_state.terminal_history:
819
+
820
  st.write(f"Command: {command}")
821
+
822
  st.code(output, language="bash")
823
 
824
+
825
+
826
+
827
  # Display Projects and Files
828
+
829
  st.subheader("Workspace Projects")
830
+
831
  for project, details in st.session_state.workspace_projects.items():
832
+
833
  st.write(f"Project: {project}")
834
+
835
  for file in details['files']:
836
+
837
  st.write(f" - {file}")
838
 
839
+
840
+
841
+
842
  # Chat with AI Agents
843
+
844
  st.subheader("Chat with AI Agents")
845
+
846
  selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
847
+
848
  agent_chat_input = st.text_area("Enter your message for the agent:")
849
+
850
  if st.button("Send to Agent"):
851
+
852
  agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
853
+
854
  st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
855
+
856
  st.write(f"{selected_agent}: {agent_chat_response}")
857
 
858
+
859
+
860
+
861
  # Automate Build Process
862
+
863
  st.subheader("Automate Build Process")
864
+
865
  if st.button("Automate"):
866
+
867
  agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
868
+
869
  summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
870
+
871
  st.write("Autonomous Build Summary:")
872
+
873
  st.write(summary)
874
+
875
  st.write("Next Step:")
876
+
877
+ st.write(next_step)
878
+
879
+
880
+
881
+
882
+ # Display current state for debugging
883
+
884
+ st.sidebar.subheader("Current State")
885
+
886
+ st.sidebar.json(st.session_state.current_state)