acecalisto3 commited on
Commit
4b47449
·
verified ·
1 Parent(s): cd7398e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +88 -29
app.py CHANGED
@@ -8,6 +8,8 @@ from io import StringIO
8
  import sys
9
  import torch
10
  from huggingface_hub import hf_hub_url, cached_download, HfApi
 
 
11
 
12
  # Access Hugging Face API key from secrets
13
  hf_token = st.secrets["hf_token"]
@@ -29,27 +31,17 @@ if 'workspace_projects' not in st.session_state:
29
  if 'available_agents' not in st.session_state:
30
  st.session_state.available_agents = []
31
 
32
- # Initialize the session state variable as an empty string
33
- st.session_state.user_input = ""
34
-
35
- # Create the text input widget
36
- user_input = st.text_input("Enter your text:", "Initial text")
37
-
38
- # Use the get() method to get the current value of the widget and update it
39
- if st.button("Update"):
40
- st.session_state.user_input = user_input
41
-
42
  # AI Guide Toggle
43
  ai_guide_level = st.sidebar.radio("AI Guide Level", ["Full Assistance", "Partial Assistance", "No Assistance"])
44
 
45
  class AIAgent:
46
- def __init__(self, name, description, skills):
47
  self.name = name
48
  self.description = description
49
  self.skills = skills
50
  self._hf_api = HfApi() # Initialize HfApi here
51
 
52
- def create_agent_prompt(self):
53
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
54
  agent_prompt = f"""
55
  As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
@@ -59,13 +51,14 @@ I am confident that I can leverage my expertise to assist you in developing and
59
  """
60
  return agent_prompt
61
 
62
- def autonomous_build(self, chat_history, workspace_projects, project_name, selected_model, hf_token):
 
63
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
64
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
65
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
66
  return summary, next_step
67
 
68
- def deploy_built_space_to_hf(self):
69
  # Assuming you have a function that generates the space content
70
  space_content = generate_space_content(project_name)
71
  repository = self._hf_api.create_repo(
@@ -82,24 +75,24 @@ I am confident that I can leverage my expertise to assist you in developing and
82
  repo_type="space",
83
  token=hf_token
84
  )
85
- return repository
86
 
87
- def has_valid_hf_token(self):
88
  return self._hf_api.whoami(token=hf_token) is not None
89
 
90
- def process_input(input_text):
91
  chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium", tokenizer="microsoft/DialoGPT-medium")
92
  response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
93
  return response
94
 
95
- def run_code(code):
96
  try:
97
  result = subprocess.run(code, shell=True, capture_output=True, text=True)
98
  return result.stdout
99
  except Exception as e:
100
  return str(e)
101
 
102
- def workspace_interface(project_name):
103
  project_path = os.path.join(PROJECT_ROOT, project_name)
104
  if not os.path.exists(project_path):
105
  os.makedirs(project_path)
@@ -108,7 +101,7 @@ def workspace_interface(project_name):
108
  else:
109
  return f"Project '{project_name}' already exists."
110
 
111
- def add_code_to_workspace(project_name, code, file_name):
112
  project_path = os.path.join(PROJECT_ROOT, project_name)
113
  if not os.path.exists(project_path):
114
  return f"Project '{project_name}' does not exist."
@@ -119,25 +112,67 @@ def add_code_to_workspace(project_name, code, file_name):
119
  st.session_state.workspace_projects[project_name]['files'].append(file_name)
120
  return f"Code added to '{file_name}' in project '{project_name}'."
121
 
122
- def display_chat_history(chat_history):
123
  return "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
124
 
125
- def display_workspace_projects(workspace_projects):
126
  return "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
127
 
128
- def generate_space_content(project_name):
129
  # Logic to generate the Streamlit app content based on project_name
130
  # ... (This is where you'll need to implement the actual code generation)
131
  return "import streamlit as st\nst.title('My Streamlit App')\nst.write('Hello, world!')"
132
 
133
  # Function to display the AI Guide chat
134
- def display_ai_guide_chat(chat_history):
135
  st.markdown("<div class='chat-history'>", unsafe_allow_html=True)
136
  for user_message, agent_message in chat_history:
137
  st.markdown(f"<div class='chat-message user'>{user_message}</div>", unsafe_allow_html=True)
138
  st.markdown(f"<div class='chat-message agent'>{agent_message}</div>", unsafe_allow_html=True)
139
  st.markdown("</div>", unsafe_allow_html=True)
140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  if __name__ == "__main__":
142
  st.sidebar.title("Navigation")
143
  app_mode = st.sidebar.selectbox("Choose the app mode", ["Home", "Terminal", "Explorer", "Code Editor", "Build & Deploy"])
@@ -155,10 +190,10 @@ if __name__ == "__main__":
155
  st.session_state.terminal_history.append((terminal_input, output))
156
  st.code(output, language="bash")
157
  if ai_guide_level != "No Assistance":
158
- st.write("Run commands here to add packages to your project. For example: `pip install <package-name>`.")
159
  if terminal_input and "install" in terminal_input:
160
  package_name = terminal_input.split("install")[-1].strip()
161
- st.write(f"Package `{package_name}` will be added to your project.")
162
 
163
  elif app_mode == "Explorer":
164
  st.header("Explorer")
@@ -189,7 +224,31 @@ if __name__ == "__main__":
189
  # Logic to save code
190
  pass
191
  if ai_guide_level != "No Assistance":
192
- st.write("The function `foo()` requires the `bar` package. Add it to `requirements.txt`.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
 
194
  elif app_mode == "Build & Deploy":
195
  st.header("Build & Deploy")
@@ -204,9 +263,9 @@ if __name__ == "__main__":
204
  st.write("Next Step:")
205
  st.write(next_step)
206
  if agent._hf_api and agent.has_valid_hf_token():
207
- repository = agent.deploy_built_space_to_hf()
208
  st.markdown("## Congratulations! Successfully deployed Space 🚀 ##")
209
- st.markdown("[Check out your new Space here](hf.co/" + repository.name + ")")
210
 
211
  # AI Guide Chat
212
  if ai_guide_level != "No Assistance":
 
8
  import sys
9
  import torch
10
  from huggingface_hub import hf_hub_url, cached_download, HfApi
11
+ import re
12
+ from typing import List, Dict
13
 
14
  # Access Hugging Face API key from secrets
15
  hf_token = st.secrets["hf_token"]
 
31
  if 'available_agents' not in st.session_state:
32
  st.session_state.available_agents = []
33
 
 
 
 
 
 
 
 
 
 
 
34
  # AI Guide Toggle
35
  ai_guide_level = st.sidebar.radio("AI Guide Level", ["Full Assistance", "Partial Assistance", "No Assistance"])
36
 
37
  class AIAgent:
38
+ def __init__(self, name: str, description: str, skills: List[str]):
39
  self.name = name
40
  self.description = description
41
  self.skills = skills
42
  self._hf_api = HfApi() # Initialize HfApi here
43
 
44
+ def create_agent_prompt(self) -> str:
45
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
46
  agent_prompt = f"""
47
  As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
 
51
  """
52
  return agent_prompt
53
 
54
+ def autonomous_build(self, chat_history: List[tuple[str, str]], workspace_projects: Dict[str, Dict],
55
+ project_name: str, selected_model: str, hf_token: str) -> tuple[str, str]:
56
  summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
57
  summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
58
  next_step = "Based on the current state, the next logical step is to implement the main application logic."
59
  return summary, next_step
60
 
61
+ def deploy_built_space_to_hf(self, project_name: str) -> str:
62
  # Assuming you have a function that generates the space content
63
  space_content = generate_space_content(project_name)
64
  repository = self._hf_api.create_repo(
 
75
  repo_type="space",
76
  token=hf_token
77
  )
78
+ return repository.name
79
 
80
+ def has_valid_hf_token(self) -> bool:
81
  return self._hf_api.whoami(token=hf_token) is not None
82
 
83
+ def process_input(input_text: str) -> str:
84
  chatbot = pipeline("text-generation", model="microsoft/DialoGPT-medium", tokenizer="microsoft/DialoGPT-medium")
85
  response = chatbot(input_text, max_length=50, num_return_sequences=1)[0]['generated_text']
86
  return response
87
 
88
+ def run_code(code: str) -> str:
89
  try:
90
  result = subprocess.run(code, shell=True, capture_output=True, text=True)
91
  return result.stdout
92
  except Exception as e:
93
  return str(e)
94
 
95
+ def workspace_interface(project_name: str) -> str:
96
  project_path = os.path.join(PROJECT_ROOT, project_name)
97
  if not os.path.exists(project_path):
98
  os.makedirs(project_path)
 
101
  else:
102
  return f"Project '{project_name}' already exists."
103
 
104
+ def add_code_to_workspace(project_name: str, code: str, file_name: str) -> str:
105
  project_path = os.path.join(PROJECT_ROOT, project_name)
106
  if not os.path.exists(project_path):
107
  return f"Project '{project_name}' does not exist."
 
112
  st.session_state.workspace_projects[project_name]['files'].append(file_name)
113
  return f"Code added to '{file_name}' in project '{project_name}'."
114
 
115
+ def display_chat_history(chat_history: List[tuple[str, str]]) -> str:
116
  return "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
117
 
118
+ def display_workspace_projects(workspace_projects: Dict[str, Dict]) -> str:
119
  return "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
120
 
121
+ def generate_space_content(project_name: str) -> str:
122
  # Logic to generate the Streamlit app content based on project_name
123
  # ... (This is where you'll need to implement the actual code generation)
124
  return "import streamlit as st\nst.title('My Streamlit App')\nst.write('Hello, world!')"
125
 
126
  # Function to display the AI Guide chat
127
+ def display_ai_guide_chat(chat_history: List[tuple[str, str]]):
128
  st.markdown("<div class='chat-history'>", unsafe_allow_html=True)
129
  for user_message, agent_message in chat_history:
130
  st.markdown(f"<div class='chat-message user'>{user_message}</div>", unsafe_allow_html=True)
131
  st.markdown(f"<div class='chat-message agent'>{agent_message}</div>", unsafe_allow_html=True)
132
  st.markdown("</div>", unsafe_allow_html=True)
133
 
134
+ # Load the CodeGPT model for code completion
135
+ code_generator = pipeline("text-generation", model="microsoft/CodeGPT-small-py", tokenizer="microsoft/CodeGPT-small-py")
136
+
137
+ def analyze_code(code: str) -> List[str]:
138
+ hints = []
139
+
140
+ # Example pointer: Suggest using list comprehensions
141
+ if re.search(r'for .* in .*:\n\s+.*\.append\(', code):
142
+ hints.append("Consider using a list comprehension instead of a loop for appending to a list.")
143
+
144
+ # Example pointer: Recommend using f-strings for string formatting
145
+ if re.search(r'\".*\%s\"|\'.*\%s\'', code) or re.search(r'\".*\%d\"|\'.*\%d\'', code):
146
+ hints.append("Consider using f-strings for cleaner and more efficient string formatting.")
147
+
148
+ # Example pointer: Avoid using global variables
149
+ if re.search(r'\bglobal\b', code):
150
+ hints.append("Avoid using global variables. Consider passing parameters or using classes.")
151
+
152
+ # Example pointer: Recommend using `with` statement for file operations
153
+ if re.search(r'open\(.+\)', code) and not re.search(r'with open\(.+\)', code):
154
+ hints.append("Consider using the `with` statement when opening files to ensure proper resource management.")
155
+
156
+ return hints
157
+
158
+ def get_code_completion(prompt: str) -> str:
159
+ # Generate code completion based on the current code input
160
+ completions = code_generator(prompt, max_length=50, num_return_sequences=1)
161
+ return completions[0]['generated_text']
162
+
163
+ def lint_code(code: str) -> List[str]:
164
+ # Capture pylint output
165
+ pylint_output = StringIO()
166
+ sys.stdout = pylint_output
167
+
168
+ # Run pylint on the provided code
169
+ pylint.lint.Run(['--from-stdin'], do_exit=False, argv=[], stdin=StringIO(code))
170
+
171
+ # Reset stdout and fetch lint results
172
+ sys.stdout = sys.__stdout__
173
+ lint_results = pylint_output.getvalue().splitlines()
174
+ return lint_results
175
+
176
  if __name__ == "__main__":
177
  st.sidebar.title("Navigation")
178
  app_mode = st.sidebar.selectbox("Choose the app mode", ["Home", "Terminal", "Explorer", "Code Editor", "Build & Deploy"])
 
190
  st.session_state.terminal_history.append((terminal_input, output))
191
  st.code(output, language="bash")
192
  if ai_guide_level != "No Assistance":
193
+ st.write("Run commands here to add packages to your project. For example: pip install <package-name>.")
194
  if terminal_input and "install" in terminal_input:
195
  package_name = terminal_input.split("install")[-1].strip()
196
+ st.write(f"Package {package_name} will be added to your project.")
197
 
198
  elif app_mode == "Explorer":
199
  st.header("Explorer")
 
224
  # Logic to save code
225
  pass
226
  if ai_guide_level != "No Assistance":
227
+ st.write("The function foo() requires the bar package. Add it to requirements.txt.")
228
+
229
+ # Analyze code and provide real-time hints
230
+ hints = analyze_code(code_editor)
231
+ if hints:
232
+ st.write("**Helpful Hints:**")
233
+ for hint in hints:
234
+ st.write(f"- {hint}")
235
+
236
+ if st.button("Get Code Suggestion"):
237
+ # Provide a predictive code completion
238
+ completion = get_code_completion(code_editor)
239
+ st.write("**Suggested Code Completion:**")
240
+ st.code(completion, language="python")
241
+
242
+ if st.button("Check Code"):
243
+ # Analyze the code for errors and warnings
244
+ lint_results = lint_code(code_editor)
245
+
246
+ if lint_results:
247
+ st.write("**Errors and Warnings:**")
248
+ for result in lint_results:
249
+ st.write(result)
250
+ else:
251
+ st.write("No issues found! Your code is clean.")
252
 
253
  elif app_mode == "Build & Deploy":
254
  st.header("Build & Deploy")
 
263
  st.write("Next Step:")
264
  st.write(next_step)
265
  if agent._hf_api and agent.has_valid_hf_token():
266
+ repository_name = agent.deploy_built_space_to_hf(project_name_input)
267
  st.markdown("## Congratulations! Successfully deployed Space 🚀 ##")
268
+ st.markdown(f"[Check out your new Space here](hf.co/{repository_name})")
269
 
270
  # AI Guide Chat
271
  if ai_guide_level != "No Assistance":