acecalisto3 commited on
Commit
3dcb660
1 Parent(s): a380c4b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +130 -166
app.py CHANGED
@@ -1,173 +1,137 @@
1
- import streamlit as st
2
- from streamlit_ace import st_ace
3
- from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
4
- from huggingface_hub import HfApi
5
  import os
6
  import subprocess
7
- import black
8
- from pylint import lint
9
- from io import StringIO
10
- import sys
11
- import re
12
- from typing import List, Dict
13
- from streamlit_jupyter import StreamlitPatcher, tqdm
14
-
15
- # This line should be at the top of your script
16
- StreamlitPatcher().jupyter() # This patches Streamlit to work in Jupyter
17
-
18
- # Access Hugging Face API key from secrets
19
- hf_token = st.secrets["hf_token"]
20
- if not hf_token:
21
- st.error("Hugging Face API key not found. Please make sure it is set in the secrets.")
22
-
23
- HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/0shotTest"
24
- PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), 'user_projects'))
25
- AVAILABLE_CODE_GENERATIVE_MODELS = ["bigcode/starcoder", "Salesforce/codegen-350M-mono", "microsoft/CodeGPT-small"]
26
-
27
- # Create PROJECT_ROOT if it doesn't exist
28
- if not os.path.exists(PROJECT_ROOT):
29
- os.makedirs(PROJECT_ROOT)
30
-
31
- # Global state management
32
- if 'chat_history' not in st.session_state:
33
- st.session_state.chat_history = []
34
- if 'terminal_history' not in st.session_state:
35
- st.session_state.terminal_history = []
36
- if 'workspace_projects' not in st.session_state:
37
- st.session_state.workspace_projects = {}
38
- if 'current_project' not in st.session_state:
39
- st.session_state.current_project = None
40
-
41
- # AI Guide Toggle
42
- ai_guide_level = st.sidebar.radio("AI Guide Level", ["Full Assistance", "Partial Assistance", "No Assistance"])
43
-
44
- # Load the CodeGPT tokenizer and model
45
- code_generator_tokenizer = AutoTokenizer.from_pretrained("microsoft/CodeGPT-small-py", clean_up_tokenization_spaces=True)
46
- code_generator = pipeline("text-generation", model="microsoft/CodeGPT-small-py", tokenizer=code_generator_tokenizer)
47
-
48
- class AIAgent:
49
- def __init__(self, name: str, description: str, skills: List[str]):
50
- self.name = name
51
- self.description = description
52
- self.skills = skills
53
-
54
- def generate_agent_response(self, prompt: str, project_name: str) -> str:
55
- # Simple logic to demonstrate agent capabilities
56
- if "create file" in prompt.lower():
57
- file_name = re.search(r"create file (\w+\.?\w*)", prompt.lower())
58
- if file_name:
59
- return self.create_file(project_name, file_name.group(1), "# New file created")
60
- else:
61
- return "I'm sorry, I couldn't understand the file name. Please specify the file name clearly."
62
-
63
- elif "add code" in prompt.lower():
64
- file_name = re.search(r"add code to (\w+\.?\w*)", prompt.lower())
65
- if file_name:
66
- code_to_add = prompt.split("add code to")[1].split(":", 1)[-1].strip()
67
- return self.add_code_to_file(project_name, file_name.group(1), code_to_add)
68
- else:
69
- return "I'm sorry, I couldn't understand the file name. Please specify the file name clearly."
70
-
71
- elif "create project" in prompt.lower():
72
- new_project_name = re.search(r"create project (\w+)", prompt.lower())
73
- if new_project_name:
74
- return self.create_project(new_project_name.group(1))
75
- else:
76
- return "I'm sorry, I couldn't understand the project name. Please specify the project name clearly."
77
-
78
  else:
79
- return f"I understand you're asking about '{prompt}'. How can I assist you with this in the context of your project?"
80
-
81
- def create_project(self, project_name: str) -> str:
82
- project_path = os.path.join(PROJECT_ROOT, project_name)
83
- if not os.path.exists(project_path):
84
- os.makedirs(project_path)
85
- st.session_state.workspace_projects[project_name] = {'files': []}
86
- return f"Project '{project_name}' created successfully."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  else:
88
- return f"Project '{project_name}' already exists."
89
-
90
- def create_file(self, project_name: str, file_name: str, content: str) -> str:
91
- project_path = os.path.join(PROJECT_ROOT, project_name)
92
- if not os.path.exists(project_path):
93
- return f"Project '{project_name}' does not exist."
94
-
95
- file_path = os.path.join(project_path, file_name)
96
- with open(file_path, "w") as file:
97
- file.write(content)
98
- st.session_state.workspace_projects[project_name]['files'].append(file_name)
99
- return f"File '{file_name}' created in project '{project_name}'."
100
-
101
- def add_code_to_file(self, project_name: str, file_name: str, code: str) -> str:
102
- project_path = os.path.join(PROJECT_ROOT, project_name)
103
- file_path = os.path.join(project_path, file_name)
104
- if not os.path.exists(file_path):
105
- return f"File '{file_name}' does not exist in project '{project_name}'."
106
-
107
- with open(file_path, "a") as file:
108
- file.write("\n" + code)
109
- return f"Code added to '{file_name}' in project '{project_name}'."
110
-
111
- # Initialize example agents
112
- example_agents = [
113
- AIAgent(
114
- name="CodeAssistant",
115
- description="An AI agent that helps with coding tasks",
116
- skills=["create_project", "create_file", "add_code_to_file"]
117
- )
118
- ]
119
-
120
- def main():
121
- st.title("AI-Guided Workspace")
122
-
123
- # Sidebar for project selection and creation
124
- st.sidebar.header("Project Management")
125
- project_name = st.sidebar.text_input("Enter project name:")
126
- if st.sidebar.button("Create Project"):
127
- result = example_agents[0].create_project(project_name)
128
- st.sidebar.write(result)
129
-
130
- projects = list(st.session_state.workspace_projects.keys())
131
- selected_project = st.sidebar.selectbox("Select a project", projects, index=0 if projects else None)
132
- if selected_project:
133
- st.session_state.current_project = selected_project
134
-
135
- # Main area
136
- if st.session_state.current_project:
137
- st.header(f"Current Project: {st.session_state.current_project}")
138
-
139
- # Chat with AI Agent
140
- st.subheader("Chat with AI Agent")
141
- user_input = st.text_input("Enter your message:")
142
- if st.button("Send"):
143
- agent_response = example_agents[0].generate_agent_response(user_input, st.session_state.current_project)
144
- st.session_state.chat_history.append((user_input, agent_response))
145
-
146
- # Display chat history
147
- for user, agent in st.session_state.chat_history:
148
- st.text(f"You: {user}")
149
- st.text(f"Agent: {agent}")
150
-
151
- # Code Editor
152
- st.subheader("Code Editor")
153
- file_name = st.text_input("File name:")
154
- code = st_ace(language="python", theme="monokai", key="code_editor")
155
- if st.button("Save File"):
156
- result = example_agents[0].create_file(st.session_state.current_project, file_name, code)
157
- st.write(result)
158
-
159
- # Display project files
160
- st.subheader("Project Files")
161
- if st.session_state.current_project in st.session_state.workspace_projects:
162
- for file in st.session_state.workspace_projects[st.session_state.current_project]['files']:
163
- st.text(file)
164
- if st.button(f"View {file}"):
165
- file_path = os.path.join(PROJECT_ROOT, st.session_state.current_project, file)
166
- with open(file_path, "r") as f:
167
- st.code(f.read())
168
 
169
- else:
170
- st.write("Please create or select a project to get started.")
 
 
 
 
 
 
171
 
172
  if __name__ == "__main__":
173
- main()
 
 
 
 
 
 
1
  import os
2
  import subprocess
3
+ import random
4
+ import json
5
+ from datetime import datetime
6
+ import gradio as gr # Corrected import for gradio
7
+
8
+ class App(gr.Blocks): # Corrected class inheritance
9
+ def __init__(self):
10
+ super().__init__()
11
+ self.app_state = {"components": []}
12
+ self.terminal_history = ""
13
+ self.components_registry = {
14
+ "Button": {
15
+ "properties": {
16
+ "label": "Click Me",
17
+ "onclick": ""
18
+ },
19
+ "description": "A clickable button",
20
+ "code_snippet": "gr.Button(value='{{label}}', variant='primary')"
21
+ },
22
+ # ... Other component definitions
23
+ }
24
+ self.nlp_model_names = [
25
+ "google/flan-t5-small",
26
+ # ... Other NLP model names
27
+ ]
28
+ self.nlp_models = []
29
+ # self.initialize_nlp_models() # Moved to run() for Gradio
30
+ self.exited = False # Add the missing attribute
31
+
32
+ def initialize_nlp_models(self):
33
+ for nlp_model_name in self.nlp_model_names:
34
+ try:
35
+ # Assuming the use of transformers library for NLP models
36
+ from transformers import pipeline
37
+ model = pipeline('text-generation', model=nlp_model_name)
38
+ self.nlp_models.append(model)
39
+ except Exception as e:
40
+ print(f"Failed to load model {nlp_model_name}: {e}")
41
+ self.nlp_models.append(None)
42
+
43
+ def get_nlp_response(self, input_text, model_index):
44
+ if self.nlp_models[model_index]:
45
+ response = self.nlp_models[model_index](input_text)
46
+ return response[0]['generated_text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  else:
48
+ return "NLP model not available."
49
+
50
+ class Component:
51
+ def __init__(self, type, properties=None, id=None):
52
+ self.type = type
53
+ self.properties = properties or {}
54
+ self.id = id or f"{type}_{random.randint(1000, 9999)}"
55
+
56
+ def to_dict(self):
57
+ return {
58
+ "type": self.type,
59
+ "properties": self.properties,
60
+ "id": self.id
61
+ }
62
+
63
+ def render(self):
64
+ code_snippet = self.properties.get("code_snippet", "")
65
+ for key, value in self.properties.items():
66
+ code_snippet = code_snippet.replace(f"{{{{{key}}}}}", str(value))
67
+ return code_snippet
68
+
69
+ def update_app_canvas(self):
70
+ code = ""
71
+ for component in self.app_state["components"]:
72
+ code += component.render() + "\n"
73
+ return code
74
+
75
+ def add_component(self, component_type):
76
+ if component_type in self.components_registry:
77
+ component = self.Component(
78
+ type=component_type,
79
+ properties=self.components_registry[component_type]["properties"]
80
+ )
81
+ self.app_state["components"].append(component)
82
+ # self.update_app_canvas() # Updated to return code
83
  else:
84
+ print(f"Component type {component_type} not found in registry.")
85
+
86
+ def run_terminal_command(self, command, history):
87
+ try:
88
+ result = subprocess.run(command, shell=True, capture_output=True, text=True)
89
+ history += result.stdout + result.stderr
90
+ return history
91
+ except Exception as e:
92
+ return str(e)
93
+
94
+ def compress_history(self, history):
95
+ lines = history.split('\\n')
96
+ compressed_lines = [line for line in lines if not line.strip().startswith('#')]
97
+ return '\\n'.join(compressed_lines)
98
+
99
+ def understand_test_results(self, test_results):
100
+ # Placeholder for understanding test results
101
+ return "Test results understood."
102
+
103
+ def get_help_message(self):
104
+ return "Available commands: add_component, run_terminal_command, compress_history, understand_test_results, get_help_message"
105
+
106
+ def process_input(self, input_text):
107
+ if input_text.startswith("add_component"):
108
+ _, component_type = input_text.split()
109
+ self.add_component(component_type)
110
+ return self.update_app_canvas() # Return updated code
111
+ elif input_text.startswith("run_terminal_command"):
112
+ _, command = input_text.split(maxsplit=1)
113
+ self.terminal_history = self.run_terminal_command(command, self.terminal_history)
114
+ return self.terminal_history
115
+ elif input_text.startswith("compress_history"):
116
+ self.terminal_history = self.compress_history(self.terminal_history)
117
+ return self.terminal_history
118
+ elif input_text.startswith("understand_test_results"):
119
+ _, test_results = input_text.split(maxsplit=1)
120
+ return self.understand_test_results(test_results)
121
+ elif input_text == "get_help_message":
122
+ return self.get_help_message()
123
+ else:
124
+ return "Unknown command. Type 'get_help_message' for available commands."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
+ def run(self):
127
+ self.initialize_nlp_models() # Initialize NLP models here
128
+ with gr.Blocks() as demo:
129
+ input_text = gr.Textbox(label="Enter your command:")
130
+ output_text = gr.Textbox(label="Output:")
131
+ btn = gr.Button("Run")
132
+ btn.click(self.process_input, inputs=[input_text], outputs=[output_text])
133
+ demo.launch()
134
 
135
  if __name__ == "__main__":
136
+ app = App()
137
+ app.run()