acecalisto3 commited on
Commit
78e9275
·
verified ·
1 Parent(s): 3496d63

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +184 -276
app.py CHANGED
@@ -1,5 +1,6 @@
 
1
  from streamlit_ace import st_ace
2
- from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM
3
  import os
4
  import subprocess
5
  import black
@@ -10,12 +11,10 @@ import torch
10
  from huggingface_hub import hf_hub_url, cached_download, HfApi
11
  import re
12
  from typing import List, Dict
13
- import streamlit as st # This import is sufficient
14
  from streamlit_jupyter import StreamlitPatcher, tqdm
15
 
16
  StreamlitPatcher().jupyter() # This patches Streamlit to work in Jupyter
17
 
18
-
19
  # Access Hugging Face API key from secrets
20
  hf_token = st.secrets["hf_token"]
21
  if not hf_token:
@@ -39,13 +38,28 @@ if 'available_agents' not in st.session_state:
39
  # AI Guide Toggle
40
  ai_guide_level = st.sidebar.radio("AI Guide Level", ["Full Assistance", "Partial Assistance", "No Assistance"])
41
 
 
 
 
 
 
 
 
 
 
 
 
42
  class AIAgent:
43
- def __init__(self, name: str, description: str, skills: List[str]):
44
  self.name = name
45
  self.description = description
46
  self.skills = skills
 
47
  self._hf_api = HfApi() # Initialize HfApi here
48
 
 
 
 
49
  def create_agent_prompt(self) -> str:
50
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
51
  agent_prompt = f"""
@@ -63,7 +77,6 @@ I am confident that I can leverage my expertise to assist you in developing and
63
  return summary, next_step
64
 
65
  def deploy_built_space_to_hf(self, project_name: str) -> str:
66
- # Assuming you have a function that generates the space content
67
  space_content = generate_space_content(project_name)
68
  repository = self._hf_api.create_repo(
69
  repo_id=project_name,
@@ -173,278 +186,173 @@ def lint_code(code: str) -> List[str]:
173
  sys.stdout = pylint_output
174
 
175
  # Run pylint on the provided code
176
- pylint.lint.Run(['--from-stdin'], do_exit=False, argv=[], stdin=StringIO(code))
177
 
178
- # Reset stdout and fetch lint results
179
  sys.stdout = sys.__stdout__
180
- lint_results = pylint_output.getvalue().splitlines()
181
- return lint_results
182
-
183
- # Sidebar for chat interface
184
- st.sidebar.title("Chat Interface")
185
- user_input = st.sidebar.text_area("Type your idea, task, or request here:")
186
-
187
- # Placeholder function to simulate code generation
188
- def generate_code(user_input):
189
- return f"# Generated code for: {user_input}\nprint('Hello, World!')"
190
-
191
- # Main layout
192
- col1, col2 = st.columns([1, 3])
193
-
194
- with col1:
195
- st.title("Code Editor")
196
- if user_input:
197
- code = generate_code(user_input)
198
- else:
199
- code = ""
200
- code = st_ace(value=code, language='python', theme='monokai', height=400)
201
-
202
- with col2:
203
- st.title("Jupyter IPython Console")
204
- st_jupyter()
205
- st.title("Read-Only Terminal")
206
- st.text_area("Terminal Output", height=200)
207
-
208
- # Placeholder for autonomous agent logic
209
- if user_input:
210
- st.sidebar.write("Processing your request...")
211
-
212
- # Example: Generate a simple "Hello, World!" Streamlit app
213
- generated_code = code_generator(f"Create a Streamlit app that displays 'Hello, World!'", max_new_tokens=50, num_return_sequences=1)[0]['generated_text']
214
- st.sidebar.write("Generated code:")
215
- st.sidebar.code(generated_code, language="python")
216
-
217
- # Update the code editor
218
- code = generated_code
219
- # ... (Additional logic for code analysis, project management, etc.)
220
-
221
- # ... (Update the Jupyter console and terminal output as needed)
222
-
223
- # ... (Interact with the AI guide chatbot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
 
225
  if __name__ == "__main__":
226
- st.sidebar.title("Navigation")
227
- app_mode = st.sidebar.selectbox("Choose the app mode", ["Home", "Terminal", "Explorer", "Code Editor", "Build & Deploy"])
228
-
229
- if app_mode == "Home":
230
- st.title("Welcome to AI-Guided Development")
231
- st.write("This application helps you build and deploy applications with the assistance of an AI Guide.")
232
- st.write("Toggle the AI Guide from the sidebar to choose the level of assistance you need.")
233
-
234
- elif app_mode == "Terminal":
235
- st.header("Terminal")
236
- terminal_input = st.text_input("Enter a command:")
237
- if st.button("Run"):
238
- output = run_code(terminal_input)
239
- st.session_state.terminal_history.append((terminal_input, output))
240
- st.code(output, language="bash")
241
- if ai_guide_level != "No Assistance":
242
- st.write("Run commands here to add packages to your project. For example: pip install <package-name>.")
243
- if terminal_input and "install" in terminal_input:
244
- package_name = terminal_input.split("install")[-1].strip()
245
- st.write(f"Package {package_name} will be added to your project.")
246
-
247
- elif app_mode == "Explorer":
248
- st.header("Explorer")
249
- uploaded_file = st.file_uploader("Upload a file", type=["py"])
250
- if uploaded_file:
251
- file_details = {"FileName": uploaded_file.name, "FileType": uploaded_file.type}
252
- st.write(file_details)
253
- save_path = os.path.join(PROJECT_ROOT, uploaded_file.name)
254
- with open(save_path, "wb") as f:
255
- f.write(uploaded_file.getbuffer())
256
- st.success(f"File {uploaded_file.name} saved successfully!")
257
-
258
- st.write("Drag and drop files into the 'app' folder.")
259
- for project, details in st.session_state.workspace_projects.items():
260
- st.write(f"Project: {project}")
261
- for file in details['files']:
262
- st.write(f" - {file}")
263
- if st.button(f"Move {file} to app folder"):
264
- # Logic to move file to 'app' folder
265
- pass
266
- if ai_guide_level != "No Assistance":
267
- st.write("You can upload files and move them into the 'app' folder for building your application.")
268
-
269
- elif app_mode == "Code Editor":
270
- st.header("Code Editor")
271
- code_editor = st.text_area("Write your code:", height=300)
272
- if st.button("Save Code"):
273
- # Logic to save code
274
- pass
275
- if ai_guide_level != "No Assistance":
276
- st.write("The function foo() requires the bar package. Add it to requirements.txt.")
277
-
278
- # Analyze code and provide real-time hints
279
- hints = analyze_code(code_editor)
280
- if hints:
281
- st.write("**Helpful Hints:**")
282
- for hint in hints:
283
- st.write(f"- {hint}")
284
-
285
- if st.button("Get Code Suggestion"):
286
- # Provide a predictive code completion
287
- completion = get_code_completion(code_editor)
288
- st.write("**Suggested Code Completion:**")
289
- st.code(completion, language="python")
290
-
291
- if st.button("Check Code"):
292
- # Analyze the code for errors and warnings
293
- lint_results = lint_code(code_editor)
294
-
295
- if lint_results:
296
- st.write("**Errors and Warnings:**")
297
- for result in lint_results:
298
- st.write(result)
299
- else:
300
- st.write("No issues found! Your code is clean.")
301
-
302
- elif app_mode == "Build & Deploy":
303
- st.header("Build & Deploy")
304
- project_name_input = st.text_input("Enter Project Name for Automation:")
305
- if st.button("Automate"):
306
- selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
307
- selected_model = st.selectbox("Select a code-generative model", AVAILABLE_CODE_GENERATIVE_MODELS)
308
- agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
309
- summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects, project_name_input, selected_model, hf_token)
310
- st.write("Autonomous Build Summary:")
311
- st.write(summary)
312
- st.write("Next Step:")
313
- st.write(next_step)
314
- if agent._hf_api and agent.has_valid_hf_token():
315
- repository_name = agent.deploy_built_space_to_hf(project_name_input)
316
- st.markdown("## Congratulations! Successfully deployed Space 🚀 ##")
317
- st.markdown(f"[Check out your new Space here](hf.co/{repository_name})")
318
-
319
- # AI Guide Chat
320
- if ai_guide_level != "No Assistance":
321
- display_ai_guide_chat(st.session_state.chat_history)
322
- # Add a text input for user to interact with the AI Guide
323
- user_input = st.text_input("Ask the AI Guide a question:", key="user_input")
324
- if st.button("Send"):
325
- if user_input:
326
- # Process the user's input and get a response from the AI Guide
327
- agent_response = process_input(user_input)
328
- st.session_state.chat_history.append((user_input, agent_response))
329
- # Clear the user input field
330
- st.session_state.user_input = ""
331
-
332
- # CSS for styling
333
- st.markdown("""
334
- <style>
335
- /* Advanced and Accommodating CSS */
336
- body {
337
- font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif;
338
- background-color: #f4f4f9;
339
- color: #333;
340
- margin: 0;
341
- padding: 0;
342
- }
343
- h1, h2, h3, h4, h5, h6 {
344
- color: #333;
345
- }
346
- .container {
347
- width: 90%;
348
- margin: 0 auto;
349
- padding: 20px;
350
- }
351
- /* Navigation Sidebar */
352
- .sidebar {
353
- background-color: #2c3e50;
354
- color: #ecf0f1;
355
- padding: 20px;
356
- height: 100vh;
357
- position: fixed;
358
- top: 0;
359
- left: 0;
360
- width: 250px;
361
- overflow-y: auto;
362
- }
363
- .sidebar a {
364
- color: #ecf0f1;
365
- text-decoration: none;
366
- display: block;
367
- padding: 10px 0;
368
- }
369
- .sidebar a:hover {
370
- background-color: #34495e;
371
- border-radius: 5px;
372
- }
373
- /* Main Content */
374
- .main-content {
375
- margin-left: 270px;
376
- padding: 20px;
377
- }
378
- /* Buttons */
379
- button {
380
- background-color: #3498db;
381
- color: #fff;
382
- border: none;
383
- padding: 10px 20px;
384
- border-radius: 5px;
385
- cursor: pointer;
386
- font-size: 16px;
387
- }
388
- button:hover {
389
- background-color: #2980b9;
390
- }
391
- /* Text Areas and Inputs */
392
- textarea, input[type="text"] {
393
- width: 100%;
394
- padding: 10px;
395
- margin: 10px 0;
396
- border: 1px solid #ddd;
397
- border-radius: 5px;
398
- box-sizing: border-box;
399
- }
400
- textarea:focus, input[type="text"]:focus {
401
- border-color: #3498db;
402
- outline: none;
403
- }
404
- /* Terminal Output */
405
- .code-output {
406
- background-color: #1e1e1e;
407
- color: #dcdcdc;
408
- padding: 20px;
409
- border-radius: 5px;
410
- font-family: 'Courier New', Courier, monospace;
411
- }
412
- /* Chat History */
413
- .chat-history {
414
- background-color: #ecf0f1;
415
- padding: 20px;
416
- border-radius: 5px;
417
- max-height: 300px;
418
- overflow-y: auto;
419
- }
420
- .chat-message {
421
- margin-bottom: 10px;
422
- }
423
- .chat-message.user {
424
- text-align: right;
425
- color: #3498db;
426
- }
427
- .chat-message.agent {
428
- text-align: left;
429
- color: #e74c3c;
430
- }
431
- /* Project Management */
432
- .project-list {
433
- background-color: #ecf0f1;
434
- padding: 20px;
435
- border-radius: 5px;
436
- max-height: 300px;
437
- overflow-y: auto;
438
- }
439
- .project-item {
440
- margin-bottom: 10px;
441
- }
442
- .project-item a {
443
- color: #3498db;
444
- text-decoration: none;
445
- }
446
- .project-item a:hover {
447
- text-decoration: underline;
448
- }
449
- </style>
450
- """, unsafe_allow_html=True)
 
1
+ import streamlit as st
2
  from streamlit_ace import st_ace
3
+ from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
4
  import os
5
  import subprocess
6
  import black
 
11
  from huggingface_hub import hf_hub_url, cached_download, HfApi
12
  import re
13
  from typing import List, Dict
 
14
  from streamlit_jupyter import StreamlitPatcher, tqdm
15
 
16
  StreamlitPatcher().jupyter() # This patches Streamlit to work in Jupyter
17
 
 
18
  # Access Hugging Face API key from secrets
19
  hf_token = st.secrets["hf_token"]
20
  if not hf_token:
 
38
  # AI Guide Toggle
39
  ai_guide_level = st.sidebar.radio("AI Guide Level", ["Full Assistance", "Partial Assistance", "No Assistance"])
40
 
41
+ class TextGenerationTool:
42
+ def __init__(self, llm: str):
43
+ self.llm = llm
44
+ self.tokenizer = AutoTokenizer.from_pretrained(llm)
45
+ self.model = AutoModelForCausalLM.from_pretrained(llm)
46
+
47
+ def generate_text(self, prompt: str, max_length: int = 50) -> str:
48
+ inputs = self.tokenizer(prompt, return_tensors="pt")
49
+ outputs = self.model.generate(**inputs, max_length=max_length)
50
+ return self.tokenizer.decode(outputs[0], skip_special_tokens=True)
51
+
52
  class AIAgent:
53
+ def __init__(self, name: str, description: str, skills: List[str], llm: str):
54
  self.name = name
55
  self.description = description
56
  self.skills = skills
57
+ self.text_gen_tool = TextGenerationTool(llm)
58
  self._hf_api = HfApi() # Initialize HfApi here
59
 
60
+ def generate_agent_response(self, prompt: str) -> str:
61
+ return self.text_gen_tool.generate_text(prompt)
62
+
63
  def create_agent_prompt(self) -> str:
64
  skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
65
  agent_prompt = f"""
 
77
  return summary, next_step
78
 
79
  def deploy_built_space_to_hf(self, project_name: str) -> str:
 
80
  space_content = generate_space_content(project_name)
81
  repository = self._hf_api.create_repo(
82
  repo_id=project_name,
 
186
  sys.stdout = pylint_output
187
 
188
  # Run pylint on the provided code
189
+ pylint.lint.Run(['--from-stdin'], do_exit=False, input=code)
190
 
191
+ # Reset stdout
192
  sys.stdout = sys.__stdout__
193
+
194
+ # Extract pylint messages
195
+ messages = pylint_output.getvalue().splitlines()
196
+
197
+ return messages
198
+
199
+ def format_code(code: str) -> str:
200
+ # Format code using Black
201
+ formatted_code = black.format_str(code, mode=black.FileMode())
202
+ return formatted_code
203
+
204
+ def main():
205
+ st.title("Streamlit Workspace")
206
+
207
+ # Load agents from the agent directory
208
+ agent_files = [f for f in os.listdir(AGENT_DIRECTORY) if f.endswith(".py")]
209
+ for agent_file in agent_files:
210
+ agent_module = __import__(f"{AGENT_DIRECTORY}.{os.path.splitext(agent_file)[0]}")
211
+ agent_class = getattr(agent_module, os.path.splitext(agent_file)[0])
212
+ agent_instance = agent_class()
213
+ st.session_state.available_agents.append(agent_instance)
214
+
215
+ # Display the available agents
216
+ st.subheader("Available Agents")
217
+ for agent in st.session_state.available_agents:
218
+ st.write(f"**{agent.name}**: {agent.description}")
219
+
220
+ # Select an agent
221
+ selected_agent = st.selectbox("Select an Agent", [agent.name for agent in st.session_state.available_agents])
222
+ current_agent = next((agent for agent in st.session_state.available_agents if agent.name == selected_agent), None)
223
+
224
+ # Display the agent's prompt
225
+ if current_agent:
226
+ st.subheader(f"{current_agent.name} Prompt")
227
+ st.write(current_agent.create_agent_prompt())
228
+
229
+ # Workspace Tab
230
+ st.subheader("Workspace")
231
+ workspace_tabs = st.tabs(["Chat", "Tool Box", "Projects"])
232
+
233
+ with workspace_tabs[0]:
234
+ # Chat Tab
235
+ st.subheader("Chat with your Agent")
236
+ user_input = st.text_input("Enter your message:")
237
+
238
+ if user_input:
239
+ st.session_state.chat_history.append((user_input, current_agent.generate_agent_response(user_input)))
240
+ user_input = "" # Clear the input field
241
+
242
+ # Display chat history
243
+ st.markdown(display_chat_history(st.session_state.chat_history))
244
+
245
+ # AI Guide
246
+ if ai_guide_level != "No Assistance":
247
+ st.subheader("AI Guide")
248
+ guide_chat_history = []
249
+ if ai_guide_level == "Full Assistance":
250
+ guide_chat_history.append((
251
+ "I'm building a Streamlit app to display data from a CSV file.",
252
+ "Great! Let's start by creating a new project in the workspace."
253
+ ))
254
+ guide_chat_history.append((
255
+ "Create a new project called 'data_app'.",
256
+ "Okay, I've created the project 'data_app'. What would you like to name the main file?"
257
+ ))
258
+ guide_chat_history.append((
259
+ "Name it 'app.py'.",
260
+ "Alright, I've added an empty 'app.py' file to the 'data_app' project. Now, let's add some code to read the CSV file."
261
+ ))
262
+ guide_chat_history.append((
263
+ "Add the following code to 'app.py':\n```python\nimport pandas as pd\nimport streamlit as st\n\ndata = pd.read_csv('data.csv')\nst.write(data)\n```",
264
+ "Excellent! Now you can run this code to see the data from your CSV file in the Streamlit app."
265
+ ))
266
+ elif ai_guide_level == "Partial Assistance":
267
+ guide_chat_history.append((
268
+ "How can I read data from a CSV file in Streamlit?",
269
+ "You can use the `pandas` library to read the CSV file and then use `streamlit.write()` to display it."
270
+ ))
271
+ display_ai_guide_chat(guide_chat_history)
272
+
273
+ with workspace_tabs[1]:
274
+ # Tool Box Tab
275
+ st.subheader("Tool Box")
276
+ tool_tabs = st.tabs(["Code Editor", "Terminal", "Code Analysis"])
277
+
278
+ with tool_tabs[0]:
279
+ # Code Editor Tab
280
+ st.subheader("Code Editor")
281
+ code_editor = st_ace(
282
+ placeholder="Write your code here...",
283
+ height=300,
284
+ theme="monokai",
285
+ key="code_editor",
286
+ language="python",
287
+ auto_update=True
288
+ )
289
+
290
+ st.button("Run Code", on_click=lambda: st.write(run_code(code_editor)))
291
+
292
+ # Code Completion
293
+ st.subheader("Code Completion")
294
+ completion_prompt = st.text_area("Enter code for completion:")
295
+ if completion_prompt:
296
+ completed_code = get_code_completion(completion_prompt)
297
+ st.write(f"**Completion:** {completed_code}")
298
+
299
+ with tool_tabs[1]:
300
+ # Terminal Tab
301
+ st.subheader("Terminal")
302
+ terminal_input = st.text_input("Enter a command:")
303
+
304
+ if terminal_input:
305
+ st.session_state.terminal_history.append(terminal_input)
306
+ st.write(run_code(terminal_input))
307
+ terminal_input = "" # Clear the input field
308
+
309
+ # Display terminal history
310
+ st.markdown("\n".join(st.session_state.terminal_history))
311
+
312
+ with tool_tabs[2]:
313
+ # Code Analysis Tab
314
+ st.subheader("Code Analysis")
315
+ code_to_analyze = st.text_area("Enter code to analyze:")
316
+ if code_to_analyze:
317
+ # Analyze code
318
+ analysis_results = analyze_code(code_to_analyze)
319
+ if analysis_results:
320
+ st.write("**Code Analysis Results:**")
321
+ for hint in analysis_results:
322
+ st.write(f"- {hint}")
323
+ else:
324
+ st.write("No code analysis suggestions found.")
325
+
326
+ # Lint code
327
+ lint_results = lint_code(code_to_analyze)
328
+ if lint_results:
329
+ st.write("**Linting Results:**")
330
+ for message in lint_results:
331
+ st.write(f"- {message}")
332
+ else:
333
+ st.write("No linting issues found.")
334
+
335
+ # Format code
336
+ formatted_code = format_code(code_to_analyze)
337
+ st.write("**Formatted Code:**")
338
+ st.code(formatted_code, language="python")
339
+
340
+ with workspace_tabs[2]:
341
+ # Projects Tab
342
+ st.subheader("Projects")
343
+ project_name = st.text_input("Enter project name:")
344
+ if st.button("Create Project"):
345
+ st.write(workspace_interface(project_name))
346
+
347
+ # Display existing projects
348
+ st.markdown(display_workspace_projects(st.session_state.workspace_projects))
349
+
350
+ # Add code to a project
351
+ selected_project = st.selectbox("Select a project", list(st.session_state.workspace_projects.keys()))
352
+ code_to_add = st.text_area("Enter code to add:")
353
+ file_name = st.text_input("Enter file name:")
354
+ if st.button("Add Code"):
355
+ st.write(add_code_to_workspace(selected_project, code_to_add, file_name))
356
 
357
  if __name__ == "__main__":
358
+ main()