Spaces:
Build error
Build error
acecalisto3
commited on
Commit
•
3dcb660
1
Parent(s):
a380c4b
Update app.py
Browse files
app.py
CHANGED
@@ -1,173 +1,137 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from streamlit_ace import st_ace
|
3 |
-
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
4 |
-
from huggingface_hub import HfApi
|
5 |
import os
|
6 |
import subprocess
|
7 |
-
import
|
8 |
-
|
9 |
-
from
|
10 |
-
import
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
self.description = description
|
52 |
-
self.skills = skills
|
53 |
-
|
54 |
-
def generate_agent_response(self, prompt: str, project_name: str) -> str:
|
55 |
-
# Simple logic to demonstrate agent capabilities
|
56 |
-
if "create file" in prompt.lower():
|
57 |
-
file_name = re.search(r"create file (\w+\.?\w*)", prompt.lower())
|
58 |
-
if file_name:
|
59 |
-
return self.create_file(project_name, file_name.group(1), "# New file created")
|
60 |
-
else:
|
61 |
-
return "I'm sorry, I couldn't understand the file name. Please specify the file name clearly."
|
62 |
-
|
63 |
-
elif "add code" in prompt.lower():
|
64 |
-
file_name = re.search(r"add code to (\w+\.?\w*)", prompt.lower())
|
65 |
-
if file_name:
|
66 |
-
code_to_add = prompt.split("add code to")[1].split(":", 1)[-1].strip()
|
67 |
-
return self.add_code_to_file(project_name, file_name.group(1), code_to_add)
|
68 |
-
else:
|
69 |
-
return "I'm sorry, I couldn't understand the file name. Please specify the file name clearly."
|
70 |
-
|
71 |
-
elif "create project" in prompt.lower():
|
72 |
-
new_project_name = re.search(r"create project (\w+)", prompt.lower())
|
73 |
-
if new_project_name:
|
74 |
-
return self.create_project(new_project_name.group(1))
|
75 |
-
else:
|
76 |
-
return "I'm sorry, I couldn't understand the project name. Please specify the project name clearly."
|
77 |
-
|
78 |
else:
|
79 |
-
return
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
87 |
else:
|
88 |
-
|
89 |
-
|
90 |
-
def
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
projects = list(st.session_state.workspace_projects.keys())
|
131 |
-
selected_project = st.sidebar.selectbox("Select a project", projects, index=0 if projects else None)
|
132 |
-
if selected_project:
|
133 |
-
st.session_state.current_project = selected_project
|
134 |
-
|
135 |
-
# Main area
|
136 |
-
if st.session_state.current_project:
|
137 |
-
st.header(f"Current Project: {st.session_state.current_project}")
|
138 |
-
|
139 |
-
# Chat with AI Agent
|
140 |
-
st.subheader("Chat with AI Agent")
|
141 |
-
user_input = st.text_input("Enter your message:")
|
142 |
-
if st.button("Send"):
|
143 |
-
agent_response = example_agents[0].generate_agent_response(user_input, st.session_state.current_project)
|
144 |
-
st.session_state.chat_history.append((user_input, agent_response))
|
145 |
-
|
146 |
-
# Display chat history
|
147 |
-
for user, agent in st.session_state.chat_history:
|
148 |
-
st.text(f"You: {user}")
|
149 |
-
st.text(f"Agent: {agent}")
|
150 |
-
|
151 |
-
# Code Editor
|
152 |
-
st.subheader("Code Editor")
|
153 |
-
file_name = st.text_input("File name:")
|
154 |
-
code = st_ace(language="python", theme="monokai", key="code_editor")
|
155 |
-
if st.button("Save File"):
|
156 |
-
result = example_agents[0].create_file(st.session_state.current_project, file_name, code)
|
157 |
-
st.write(result)
|
158 |
-
|
159 |
-
# Display project files
|
160 |
-
st.subheader("Project Files")
|
161 |
-
if st.session_state.current_project in st.session_state.workspace_projects:
|
162 |
-
for file in st.session_state.workspace_projects[st.session_state.current_project]['files']:
|
163 |
-
st.text(file)
|
164 |
-
if st.button(f"View {file}"):
|
165 |
-
file_path = os.path.join(PROJECT_ROOT, st.session_state.current_project, file)
|
166 |
-
with open(file_path, "r") as f:
|
167 |
-
st.code(f.read())
|
168 |
|
169 |
-
|
170 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
171 |
|
172 |
if __name__ == "__main__":
|
173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import os
|
2 |
import subprocess
|
3 |
+
import random
|
4 |
+
import json
|
5 |
+
from datetime import datetime
|
6 |
+
import gradio as gr # Corrected import for gradio
|
7 |
+
|
8 |
+
class App(gr.Blocks): # Corrected class inheritance
|
9 |
+
def __init__(self):
|
10 |
+
super().__init__()
|
11 |
+
self.app_state = {"components": []}
|
12 |
+
self.terminal_history = ""
|
13 |
+
self.components_registry = {
|
14 |
+
"Button": {
|
15 |
+
"properties": {
|
16 |
+
"label": "Click Me",
|
17 |
+
"onclick": ""
|
18 |
+
},
|
19 |
+
"description": "A clickable button",
|
20 |
+
"code_snippet": "gr.Button(value='{{label}}', variant='primary')"
|
21 |
+
},
|
22 |
+
# ... Other component definitions
|
23 |
+
}
|
24 |
+
self.nlp_model_names = [
|
25 |
+
"google/flan-t5-small",
|
26 |
+
# ... Other NLP model names
|
27 |
+
]
|
28 |
+
self.nlp_models = []
|
29 |
+
# self.initialize_nlp_models() # Moved to run() for Gradio
|
30 |
+
self.exited = False # Add the missing attribute
|
31 |
+
|
32 |
+
def initialize_nlp_models(self):
|
33 |
+
for nlp_model_name in self.nlp_model_names:
|
34 |
+
try:
|
35 |
+
# Assuming the use of transformers library for NLP models
|
36 |
+
from transformers import pipeline
|
37 |
+
model = pipeline('text-generation', model=nlp_model_name)
|
38 |
+
self.nlp_models.append(model)
|
39 |
+
except Exception as e:
|
40 |
+
print(f"Failed to load model {nlp_model_name}: {e}")
|
41 |
+
self.nlp_models.append(None)
|
42 |
+
|
43 |
+
def get_nlp_response(self, input_text, model_index):
|
44 |
+
if self.nlp_models[model_index]:
|
45 |
+
response = self.nlp_models[model_index](input_text)
|
46 |
+
return response[0]['generated_text']
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
else:
|
48 |
+
return "NLP model not available."
|
49 |
+
|
50 |
+
class Component:
|
51 |
+
def __init__(self, type, properties=None, id=None):
|
52 |
+
self.type = type
|
53 |
+
self.properties = properties or {}
|
54 |
+
self.id = id or f"{type}_{random.randint(1000, 9999)}"
|
55 |
+
|
56 |
+
def to_dict(self):
|
57 |
+
return {
|
58 |
+
"type": self.type,
|
59 |
+
"properties": self.properties,
|
60 |
+
"id": self.id
|
61 |
+
}
|
62 |
+
|
63 |
+
def render(self):
|
64 |
+
code_snippet = self.properties.get("code_snippet", "")
|
65 |
+
for key, value in self.properties.items():
|
66 |
+
code_snippet = code_snippet.replace(f"{{{{{key}}}}}", str(value))
|
67 |
+
return code_snippet
|
68 |
+
|
69 |
+
def update_app_canvas(self):
|
70 |
+
code = ""
|
71 |
+
for component in self.app_state["components"]:
|
72 |
+
code += component.render() + "\n"
|
73 |
+
return code
|
74 |
+
|
75 |
+
def add_component(self, component_type):
|
76 |
+
if component_type in self.components_registry:
|
77 |
+
component = self.Component(
|
78 |
+
type=component_type,
|
79 |
+
properties=self.components_registry[component_type]["properties"]
|
80 |
+
)
|
81 |
+
self.app_state["components"].append(component)
|
82 |
+
# self.update_app_canvas() # Updated to return code
|
83 |
else:
|
84 |
+
print(f"Component type {component_type} not found in registry.")
|
85 |
+
|
86 |
+
def run_terminal_command(self, command, history):
|
87 |
+
try:
|
88 |
+
result = subprocess.run(command, shell=True, capture_output=True, text=True)
|
89 |
+
history += result.stdout + result.stderr
|
90 |
+
return history
|
91 |
+
except Exception as e:
|
92 |
+
return str(e)
|
93 |
+
|
94 |
+
def compress_history(self, history):
|
95 |
+
lines = history.split('\\n')
|
96 |
+
compressed_lines = [line for line in lines if not line.strip().startswith('#')]
|
97 |
+
return '\\n'.join(compressed_lines)
|
98 |
+
|
99 |
+
def understand_test_results(self, test_results):
|
100 |
+
# Placeholder for understanding test results
|
101 |
+
return "Test results understood."
|
102 |
+
|
103 |
+
def get_help_message(self):
|
104 |
+
return "Available commands: add_component, run_terminal_command, compress_history, understand_test_results, get_help_message"
|
105 |
+
|
106 |
+
def process_input(self, input_text):
|
107 |
+
if input_text.startswith("add_component"):
|
108 |
+
_, component_type = input_text.split()
|
109 |
+
self.add_component(component_type)
|
110 |
+
return self.update_app_canvas() # Return updated code
|
111 |
+
elif input_text.startswith("run_terminal_command"):
|
112 |
+
_, command = input_text.split(maxsplit=1)
|
113 |
+
self.terminal_history = self.run_terminal_command(command, self.terminal_history)
|
114 |
+
return self.terminal_history
|
115 |
+
elif input_text.startswith("compress_history"):
|
116 |
+
self.terminal_history = self.compress_history(self.terminal_history)
|
117 |
+
return self.terminal_history
|
118 |
+
elif input_text.startswith("understand_test_results"):
|
119 |
+
_, test_results = input_text.split(maxsplit=1)
|
120 |
+
return self.understand_test_results(test_results)
|
121 |
+
elif input_text == "get_help_message":
|
122 |
+
return self.get_help_message()
|
123 |
+
else:
|
124 |
+
return "Unknown command. Type 'get_help_message' for available commands."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
|
126 |
+
def run(self):
|
127 |
+
self.initialize_nlp_models() # Initialize NLP models here
|
128 |
+
with gr.Blocks() as demo:
|
129 |
+
input_text = gr.Textbox(label="Enter your command:")
|
130 |
+
output_text = gr.Textbox(label="Output:")
|
131 |
+
btn = gr.Button("Run")
|
132 |
+
btn.click(self.process_input, inputs=[input_text], outputs=[output_text])
|
133 |
+
demo.launch()
|
134 |
|
135 |
if __name__ == "__main__":
|
136 |
+
app = App()
|
137 |
+
app.run()
|