acecalisto3 commited on
Commit
db3ccfe
·
verified ·
1 Parent(s): d1ba91c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +417 -298
app.py CHANGED
@@ -1,303 +1,422 @@
1
- import os
2
- import json
3
- import time
4
- from typing import Dict, List, Tuple
5
 
6
- import gradio as gr
7
- import streamlit as st
8
- from huggingface_hub import InferenceClient, hf_hub_url, cached_download
9
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
10
- from rich import print as rprint
11
- from rich.panel import Panel
12
- from rich.progress import track
13
- from rich.table import Table
14
  import subprocess
15
- import threading
16
- import git
17
- from langchain.llms import HuggingFaceHub
18
- from langchain.chains import ConversationChain
19
- from langchain.memory import ConversationBufferMemory
20
- from langchain.chains.question_answering import load_qa_chain
21
- from langchain.text_splitter import CharacterTextSplitter
22
- from langchain_community.document_loaders import TextLoader
23
- from streamlit_ace import st_ace
24
- from streamlit_chat import message
25
-
26
- # --- Constants ---
27
- MODEL_NAME = "google/flan-t5-xl" # Consider using a more powerful model
28
- MAX_NEW_TOKENS = 2048 # Increased for better code generation
29
- TEMPERATURE = 0.7
30
- TOP_P = 0.95
31
- REPETITION_PENALTY = 1.2
32
-
33
- # --- Model & Tokenizer ---
34
- @st.cache_resource
35
- def load_model_and_tokenizer():
36
- model = AutoModelForCausalLM.from_pretrained(MODEL_NAME, device_map="auto") # Use 'auto' for optimal device selection
37
- tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
38
- return model, tokenizer
39
-
40
- model, tokenizer = load_model_and_tokenizer()
41
-
42
- # --- Agents ---
43
- agents = {
44
- "WEB_DEV": {
45
- "description": "Expert in web development technologies and frameworks.",
46
- "skills": ["HTML", "CSS", "JavaScript", "React", "Vue.js", "Flask", "Django", "Node.js", "Express.js"],
47
- "system_prompt": "You are a web development expert. Your goal is to assist the user in building and deploying web applications. Provide code snippets, explanations, and guidance on best practices.",
48
- },
49
- "AI_SYSTEM_PROMPT": {
50
- "description": "Expert in designing and implementing AI systems.",
51
- "skills": ["Machine Learning", "Deep Learning", "Natural Language Processing", "Computer Vision", "Reinforcement Learning"],
52
- "system_prompt": "You are an AI system expert. Your goal is to assist the user in designing and implementing AI systems. Provide code snippets, explanations, and guidance on best practices.",
53
- },
54
- "PYTHON_CODE_DEV": {
55
- "description": "Expert in Python programming and development.",
56
- "skills": ["Python", "Data Structures", "Algorithms", "Object-Oriented Programming", "Functional Programming"],
57
- "system_prompt": "You are a Python code development expert. Your goal is to assist the user in writing and debugging Python code. Provide code snippets, explanations, and guidance on best practices.",
58
- },
59
- "CODE_REVIEW_ASSISTANT": {
60
- "description": "Expert in code review and quality assurance.",
61
- "skills": ["Code Style", "Best Practices", "Security", "Performance", "Maintainability"],
62
- "system_prompt": "You are a code review expert. Your goal is to assist the user in reviewing and improving their code. Provide feedback on code quality, style, and best practices.",
63
- },
64
- }
65
-
66
- # --- Session State ---
67
- if "workspace_projects" not in st.session_state:
68
- st.session_state.workspace_projects = {}
69
- if "chat_history" not in st.session_state:
70
- st.session_state.chat_history = []
71
- if "active_agent" not in st.session_state:
72
- st.session_state.active_agent = None
73
- if "selected_agents" not in st.session_state:
74
- st.session_state.selected_agents = []
75
- if "current_project" not in st.session_state:
76
- st.session_state.current_project = None
77
- if "messages" not in st.session_state:
78
- st.session_state.messages = []
79
-
80
- # --- Helper Functions ---
81
- def add_code_to_workspace(project_name: str, code: str, file_name: str):
82
- if project_name in st.session_state.workspace_projects:
83
- st.session_state.workspace_projects[project_name]['files'].append({'file_name': file_name, 'code': code})
84
- return f"Added code to {file_name} in project {project_name}"
85
- else:
86
- return f"Project {project_name} does not exist"
87
-
88
- def terminal_interface(command: str, project_name: str):
89
- if project_name in st.session_state.workspace_projects:
90
- result = subprocess.run(command, cwd=project_name, shell=True, capture_output=True, text=True)
91
- return result.stdout + result.stderr
92
- else:
93
- return f"Project {project_name} does not exist"
94
-
95
- def get_agent_response(message: str, system_prompt: str):
96
- llm = HuggingFaceHub(repo_id=MODEL_NAME, model_kwargs={"temperature": TEMPERATURE, "top_p": TOP_P, "repetition_penalty": REPETITION_PENALTY, "max_length": MAX_NEW_TOKENS})
97
- memory = ConversationBufferMemory()
98
- conversation = ConversationChain(llm=llm, memory=memory)
99
- response = conversation.run(system_prompt + "\n" + message)
100
- return response
101
-
102
- def display_agent_info(agent_name: str):
103
- agent = agents[agent_name]
104
- st.sidebar.subheader(f"Active Agent: {agent_name}")
105
- st.sidebar.write(f"Description: {agent['description']}")
106
- st.sidebar.write(f"Skills: {', '.join(agent['skills'])}")
107
-
108
- def display_workspace_projects():
109
- st.subheader("Workspace Projects")
110
- for project_name, project_data in st.session_state.workspace_projects.items():
111
- with st.expander(project_name):
112
- for file in project_data['files']:
113
- st.text(file['file_name'])
114
- st.code(file['code'], language="python")
115
-
116
- def display_chat_history():
117
- st.subheader("Chat History")
118
- for message in st.session_state.chat_history:
119
- st.text(message)
120
-
121
- def run_autonomous_build(selected_agents: List[str], project_name: str):
122
- st.info("Starting autonomous build process...")
123
- for agent in selected_agents:
124
- st.write(f"Agent {agent} is working on the project...")
125
- code = get_agent_response(f"Generate code for a simple web application in project {project_name}", agents[agent]['system_prompt'])
126
- add_code_to_workspace(project_name, code, f"{agent.lower()}_app.py")
127
- st.write(f"Agent {agent} has completed its task.")
128
- st.success("Autonomous build process completed!")
129
-
130
- def collaborative_agent_example(selected_agents: List[str], project_name: str, task: str):
131
- st.info(f"Starting collaborative task: {task}")
132
- responses = {}
133
- for agent in selected_agents:
134
- st.write(f"Agent {agent} is working on the task...")
135
- response = get_agent_response(task, agents[agent]['system_prompt'])
136
- responses[agent] = response
137
-
138
- combined_response = combine_and_process_responses(responses, task)
139
- st.success("Collaborative task completed!")
140
- st.write(combined_response)
141
-
142
- def combine_and_process_responses(responses: Dict[str, str], task: str) -> str:
143
- # This is a placeholder function. In a real-world scenario, you would implement
144
- # more sophisticated logic to combine and process the responses.
145
- combined = "\n\n".join([f"{agent}: {response}" for agent, response in responses.items()])
146
- return f"Combined response for task '{task}':\n\n{combined}"
147
-
148
- # --- Chat Interface ---
149
- st.subheader("Chat with AI Agents")
150
-
151
- # Display chat messages from history on app rerun
152
- for message in st.session_state.messages:
153
- with st.chat_message(message["role"]):
154
- st.markdown(message["content"])
155
-
156
- # React to user input
157
- if prompt := st.chat_input("What is up?"):
158
- # Display user message in chat message container
159
- st.chat_message("user").markdown(prompt)
160
- # Add user message to chat history
161
- st.session_state.messages.append({"role": "user", "content": prompt})
162
-
163
- # Process the message with selected agents
164
- if st.session_state.selected_agents:
165
- responses = {}
166
- for agent in st.session_state.selected_agents:
167
- response = get_agent_response(prompt, agents[agent]['system_prompt'])
168
- responses[agent] = response
169
-
170
- # Combine responses (you may want to implement a more sophisticated combination method)
171
- combined_response = "\n".join([f"{agent}: {resp}" for agent, resp in responses.items()])
172
-
173
- # Display assistant response in chat message container
174
- with st.chat_message("assistant"):
175
- st.markdown(combined_response)
176
- # Add assistant response to chat history
177
- st.session_state.messages.append({"role": "assistant", "content": combined_response})
178
- else:
179
- st.warning("Please select at least one agent to chat with.")
180
-
181
- # Agent selection
182
- st.sidebar.subheader("Select AI Agents")
183
- st.session_state.selected_agents = st.sidebar.multiselect("Select AI agents", list(agents.keys()), key="agent_select")
184
-
185
-
186
- # --- Project Management ---
187
- st.header("Project Management")
188
- project_name = st.text_input("Enter project name:")
189
- if st.button("Create Project"):
190
- if project_name and project_name not in st.session_state.workspace_projects:
191
- st.session_state.workspace_projects[project_name] = {'files': []}
192
- st.success(f"Created project: {project_name}")
193
- elif project_name in st.session_state.workspace_projects:
194
- st.warning(f"Project {project_name} already exists")
195
- else:
196
- st.warning("Please enter a project name")
197
-
198
- # --- Code Editor ---
199
- st.subheader("Code Editor")
200
- if st.session_state.workspace_projects:
201
- selected_project = st.selectbox("Select project", list(st.session_state.workspace_projects.keys()))
202
- if selected_project:
203
- files = [file['file_name'] for file in st.session_state.workspace_projects[selected_project]['files']]
204
- selected_file = st.selectbox("Select file to edit", files) if files else None
205
- if selected_file:
206
- file_content = next((file['code'] for file in st.session_state.workspace_projects[selected_project]['files'] if file['file_name'] == selected_file), "")
207
- edited_code = st_ace(value=file_content, language="python", theme="monokai", key="code_editor")
208
- if st.button("Save Changes"):
209
- for file in st.session_state.workspace_projects[selected_project]['files']:
210
- if file['file_name'] == selected_file:
211
- file['code'] = edited_code
212
- st.success("Changes saved successfully!")
213
- break
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  else:
215
- st.info("No files in the project. Use the chat interface to generate code.")
216
- else:
217
- st.info("No projects created yet. Create a project to start coding.")
218
-
219
- # --- Terminal Interface ---
220
- st.subheader("Terminal (Workspace Context)")
221
- if st.session_state.workspace_projects:
222
- selected_project = st.selectbox("Select project for terminal", list(st.session_state.workspace_projects.keys()))
223
- terminal_input = st.text_input("Enter a command within the workspace:")
224
- if st.button("Run Command"):
225
- terminal_output = terminal_interface(terminal_input, selected_project)
226
- st.code(terminal_output, language="bash")
227
- else:
228
- st.info("No projects created yet. Create a project to use the terminal.")
229
-
230
- # --- Chat Interface ---
231
- st.subheader("Chat with ")
232
- selected_agents = st.multiselect("Select AI agents", list(agents.keys()), key="agent_select")
233
- st.session_state.selected_agents = selected_agents
234
- agent_chat_input = st.text_area("Enter your message for the agents:", key="agent_input")
235
- if st.button("Send to Agents", key="agent_send"):
236
- if selected_agents and agent_chat_input:
237
- responses = {}
238
- for agent in selected_agents:
239
- response = get_agent_response(agent_chat_input, agents[agent]['system_prompt'])
240
- responses[agent] = response
241
- st.session_state.chat_history.append(f"User: {agent_chat_input}")
242
- for agent, response in responses.items():
243
- st.session_state.chat_history.append(f"{agent}: {response}")
244
- st_chat(st.session_state.chat_history) # Display chat history using st_chat
245
- else:
246
- st.warning("Please select at least one agent and enter a message.")
247
-
248
- # --- Agent Control ---
249
- st.subheader("Agent Control")
250
- for agent_name in agents:
251
- agent = agents[agent_name]
252
- with st.expander(f"{agent_name} ({agent['description']})"):
253
- if st.button(f"Activate {agent_name}", key=f"activate_{agent_name}"):
254
- st.session_state.active_agent = agent_name
255
- st.success(f"{agent_name} activated.")
256
- if st.button(f"Deactivate {agent_name}", key=f"deactivate_{agent_name}"):
257
- st.session_state.active_agent = None
258
- st.success(f"{agent_name} deactivated.")
259
-
260
- # --- Automate Build Process ---
261
- st.subheader("Automate Build Process")
262
- if st.button("Automate"):
263
- if st.session_state.selected_agents and project_name:
264
- run_autonomous_build(st.session_state.selected_agents, project_name)
265
- else:
266
- st.warning("Please select at least one agent and create a project.")
267
-
268
- # --- Version Control ---
269
- st.subheader("Version Control")
270
- repo_url = st.text_input("Enter repository URL:")
271
- if st.button("Clone Repository"):
272
- if repo_url and project_name:
273
- try:
274
- git.Repo.clone_from(repo_url, project_name)
275
- st.success(f"Repository cloned successfully to {project_name}")
276
- except git.GitCommandError as e:
277
- st.error(f"Error cloning repository: {e}")
278
- else:
279
- st.warning("Please enter a repository URL and create a project.")
280
-
281
- # --- Collaborative Agent Example ---
282
- st.subheader("Collaborative Agent Example")
283
- collab_agents = st.multiselect("Select AI agents for collaboration", list(agents.keys()), key="collab_agent_select")
284
- collab_project = st.text_input("Enter project name for collaboration:")
285
- collab_task = st.text_input("Enter collaborative task:")
286
- if st.button("Run Collaborative Task"):
287
- if collab_agents and collab_project and collab_task:
288
- collaborative_agent_example(collab_agents, collab_project, collab_task)
289
- else:
290
- st.warning("Please select agents, enter a project name, and specify a task.")
291
-
292
- # --- Display Information ---
293
- st.sidebar.subheader("Current State")
294
- st.sidebar.json(st.session_state)
295
- if st.session_state.active_agent:
296
- display_agent_info(st.session_state.active_agent)
297
- display_workspace_projects()
298
- display_chat_history()
299
 
300
  if __name__ == "__main__":
301
- st.sidebar.title("DevToolKit")
302
- st.sidebar.info("This is an AI-powered development environment.")
303
- st.run()
 
 
 
 
 
1
 
2
+
3
+ import os
 
 
 
 
 
 
4
  import subprocess
5
+ from huggingface_hub import InferenceClient
6
+ import gradio as gr
7
+ import random
8
+ import time
9
+ from typing import List, Dict
10
+ from flask import Flask, request, jsonify
11
+
12
+ # Constants
13
+ AGENT_TYPES = [
14
+ "Task Executor",
15
+ "Information Retriever",
16
+ "Decision Maker",
17
+ "Data Analyzer",
18
+ ]
19
+ TOOL_TYPES = [
20
+ "Web Scraper",
21
+ "Database Connector",
22
+ "API Caller",
23
+ "File Handler",
24
+ "Text Processor",
25
+ ]
26
+ VERBOSE = False
27
+ MAX_HISTORY = 100
28
+ MODEL = "mistralai/Mixtral-8x7B-Instruct-v0.1"
29
+
30
+ # Initialize Hugging Face client
31
+ client = InferenceClient(MODEL)
32
+
33
+ # Import necessary prompts and functions from the existing code
34
+ from .prompts import (
35
+ ACTION_PROMPT,
36
+ ADD_PROMPT,
37
+ COMPRESS_HISTORY_PROMPT,
38
+ LOG_PROMPT,
39
+ LOG_RESPONSE,
40
+ MODIFY_PROMPT,
41
+ PREFIX,
42
+ READ_PROMPT,
43
+ TASK_PROMPT,
44
+ UNDERSTAND_TEST_RESULTS_PROMPT,
45
+ )
46
+ from .utils import parse_action, parse_file_content, read_python_module_structure
47
+
48
+ class Agent:
49
+ def __init__(self, name: str, agent_type: str, complexity: int):
50
+ self.name = name
51
+ self.type = agent_type
52
+ self.complexity = complexity
53
+ self.tools = []
54
+
55
+ def add_tool(self, tool):
56
+ self.tools.append(tool)
57
+
58
+ def __str__(self):
59
+ return f"{self.name} ({self.type}) - Complexity: {self.complexity}"
60
+
61
+ class Tool:
62
+ def __init__(self, name: str, tool_type: str):
63
+ self.name = name
64
+ self.type = tool_type
65
+
66
+ def __str__(self):
67
+ return f"{self.name} ({self.type})"
68
+
69
+ class Pypelyne:
70
+ def __init__(self):
71
+ self.agents: List[Agent] = []
72
+ self.tools: List[Tool] = []
73
+ self.history = ""
74
+ self.task = None
75
+ self.purpose = None
76
+ self.directory = None
77
+
78
+ def add_agent(self, agent: Agent):
79
+ self.agents.append(agent)
80
+
81
+ def add_tool(self, tool: Tool):
82
+ self.tools.append(tool)
83
+
84
+ def generate_chat_app(self):
85
+ time.sleep(2) # Simulate processing time
86
+ return f"Chat app generated with {len(self.agents)} agents and {len(self.tools)} tools."
87
+
88
+ def run_gpt(self, prompt_template, stop_tokens, max_tokens, **prompt_kwargs):
89
+ content = PREFIX.format(
90
+ module_summary=read_python_module_structure(self.directory)[0],
91
+ purpose=self.purpose,
92
+ ) + prompt_template.format(**prompt_kwargs)
93
+
94
+ if VERBOSE:
95
+ print(LOG_PROMPT.format(content))
96
+
97
+ stream = client.text_generation(
98
+ prompt=content,
99
+ max_new_tokens=max_tokens,
100
+ stop_sequences=stop_tokens if stop_tokens else None,
101
+ do_sample=True,
102
+ temperature=0.7,
103
+ )
104
+
105
+ resp = "".join(token for token in stream)
106
+
107
+ if VERBOSE:
108
+ print(LOG_RESPONSE.format(resp))
109
+ return resp
110
+
111
+ def compress_history(self):
112
+ resp = self.run_gpt(
113
+ COMPRESS_HISTORY_PROMPT,
114
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
115
+ max_tokens=512,
116
+ task=self.task,
117
+ history=self.history,
118
+ )
119
+ self.history = f"observation: {resp}\n"
120
+
121
+ def run_action(self, action_name, action_input):
122
+ if action_name == "COMPLETE":
123
+ return "Task completed."
124
+
125
+ if len(self.history.split("\n")) > MAX_HISTORY:
126
+ if VERBOSE:
127
+ print("COMPRESSING HISTORY")
128
+ self.compress_history()
129
+
130
+ action_funcs = {
131
+ "MAIN": self.call_main,
132
+ "UPDATE-TASK": self.call_set_task,
133
+ "MODIFY-FILE": self.call_modify,
134
+ "READ-FILE": self.call_read,
135
+ "ADD-FILE": self.call_add,
136
+ "TEST": self.call_test,
137
+ }
138
+
139
+ if action_name not in action_funcs:
140
+ return f"Unknown action: {action_name}"
141
+
142
+ print(f"RUN: {action_name} {action_input}")
143
+ return action_funcs[action_name](action_input)
144
+
145
+ def call_main(self, action_input):
146
+ resp = self.run_gpt(
147
+ ACTION_PROMPT,
148
+ stop_tokens=["observation:", "task:"],
149
+ max_tokens=256,
150
+ task=self.task,
151
+ history=self.history,
152
+ )
153
+ lines = resp.strip().strip("\n").split("\n")
154
+ for line in lines:
155
+ if line == "":
156
+ continue
157
+ if line.startswith("thought: "):
158
+ self.history += f"{line}\n"
159
+ elif line.startswith("action: "):
160
+ action_name, action_input = parse_action(line)
161
+ self.history += f"{line}\n"
162
+ return self.run_action(action_name, action_input)
163
+ return "No valid action found."
164
+
165
+ def call_set_task(self, action_input):
166
+ self.task = self.run_gpt(
167
+ TASK_PROMPT,
168
+ stop_tokens=[],
169
+ max_tokens=64,
170
+ task=self.task,
171
+ history=self.history,
172
+ ).strip("\n")
173
+ self.history += f"observation: task has been updated to: {self.task}\n"
174
+ return f"Task updated: {self.task}"
175
+
176
+ def call_modify(self, action_input):
177
+ if not os.path.exists(action_input):
178
+ self.history += "observation: file does not exist\n"
179
+ return "File does not exist."
180
+
181
+ content = read_python_module_structure(self.directory)[1]
182
+ f_content = (
183
+ content[action_input] if content[action_input] else "< document is empty >"
184
+ )
185
+
186
+ resp = self.run_gpt(
187
+ MODIFY_PROMPT,
188
+ stop_tokens=["action:", "thought:", "observation:"],
189
+ max_tokens=2048,
190
+ task=self.task,
191
+ history=self.history,
192
+ file_path=action_input,
193
+ file_contents=f_content,
194
+ )
195
+ new_contents, description = parse_file_content(resp)
196
+ if new_contents is None:
197
+ self.history += "observation: failed to modify file\n"
198
+ return "Failed to modify file."
199
+
200
+ with open(action_input, "w") as f:
201
+ f.write(new_contents)
202
+
203
+ self.history += f"observation: file successfully modified\n"
204
+ self.history += f"observation: {description}\n"
205
+ return f"File modified: {action_input}"
206
+
207
+ def call_read(self, action_input):
208
+ if not os.path.exists(action_input):
209
+ self.history += "observation: file does not exist\n"
210
+ return "File does not exist."
211
+
212
+ content = read_python_module_structure(self.directory)[1]
213
+ f_content = (
214
+ content[action_input] if content[action_input] else "< document is empty >"
215
+ )
216
+
217
+ resp = self.run_gpt(
218
+ READ_PROMPT,
219
+ stop_tokens=[],
220
+ max_tokens=256,
221
+ task=self.task,
222
+ history=self.history,
223
+ file_path=action_input,
224
+ file_contents=f_content,
225
+ ).strip("\n")
226
+ self.history += f"observation: {resp}\n"
227
+ return f"File read: {action_input}"
228
+
229
+ def call_add(self, action_input):
230
+ d = os.path.dirname(action_input)
231
+ if not d.startswith(self.directory):
232
+ self.history += (
233
+ f"observation: files must be under directory {self.directory}\n"
234
+ )
235
+ return f"Invalid directory: {d}"
236
+ elif not action_input.endswith(".py"):
237
+ self.history += "observation: can only write .py files\n"
238
+ return "Only .py files are allowed."
239
+ else:
240
+ if d and not os.path.exists(d):
241
+ os.makedirs(d)
242
+ if not os.path.exists(action_input):
243
+ resp = self.run_gpt(
244
+ ADD_PROMPT,
245
+ stop_tokens=["action:", "thought:", "observation:"],
246
+ max_tokens=2048,
247
+ task=self.task,
248
+ history=self.history,
249
+ file_path=action_input,
250
+ )
251
+ new_contents, description = parse_file_content(resp)
252
+ if new_contents is None:
253
+ self.history += "observation: failed to write file\n"
254
+ return "Failed to write file."
255
+
256
+ with open(action_input, "w") as f:
257
+ f.write(new_contents)
258
+
259
+ self.history += "observation: file successfully written\n"
260
+ self.history += f"observation: {description}\n"
261
+ return f"File added: {action_input}"
262
+ else:
263
+ self.history += "observation: file already exists\n"
264
+ return "File already exists."
265
+
266
+ def call_test(self, action_input):
267
+ result = subprocess.run(
268
+ ["python", "-m", "pytest", "--collect-only", self.directory],
269
+ capture_output=True,
270
+ text=True,
271
+ )
272
+ if result.returncode != 0:
273
+ self.history += f"observation: there are no tests! Test should be written in a test folder under {self.directory}\n"
274
+ return "No tests found."
275
+ result = subprocess.run(
276
+ ["python", "-m", "pytest", self.directory], capture_output=True, text=True
277
+ )
278
+ if result.returncode == 0:
279
+ self.history += "observation: tests pass\n"
280
+ return "All tests passed."
281
+
282
+ resp = self.run_gpt(
283
+ UNDERSTAND_TEST_RESULTS_PROMPT,
284
+ stop_tokens=[],
285
+ max_tokens=256,
286
+ task=self.task,
287
+ history=self.history,
288
+ stdout=result.stdout[:5000],
289
+ stderr=result.stderr[:5000],
290
+ )
291
+ self.history += f"observation: tests failed: {resp}\n"
292
+ return f"Tests failed: {resp}"
293
+
294
+ pypelyne = Pypelyne()
295
+
296
+ def create_agent(name: str, agent_type: str, complexity: int) -> Agent:
297
+ agent = Agent(name, agent_type, complexity)
298
+ pypelyne.add_agent(agent)
299
+ return agent
300
+
301
+ def create_tool(name: str, tool_type: str) -> Tool:
302
+ tool = Tool(name, tool_type)
303
+ pypelyne.add_tool(tool)
304
+ return tool
305
+
306
+ def main():
307
+ # Create a Flask app
308
+ app = Flask(__name__)
309
+
310
+ # Define a route for the chat interface
311
+ @app.route("/chat", methods=["GET", "POST"])
312
+ def chat():
313
+ if request.method == "POST":
314
+ # Get the user's input
315
+ user_input = request.form["input"]
316
+
317
+ # Run the input through the Pypelyne
318
+ response = pypelyne.run_action("MAIN", user_input)
319
+
320
+ # Return the response
321
+ return jsonify({"response": response})
322
+ else:
323
+ # Return the chat interface
324
+ return """
325
+ <html>
326
+ <body>
327
+ <h1>Pypelyne Chat Interface</h1>
328
+ <form action="/chat" method="post">
329
+ <input type="text" name="input" placeholder="Enter your input">
330
+ <input type="submit" value="Submit">
331
+ </form>
332
+ <div id="response"></div>
333
+ <script>
334
+ // Update the response div with the response from the server
335
+ function updateResponse(response) {
336
+ document.getElementById("response").innerHTML = response;
337
+ }
338
+ </script>
339
+ </body>
340
+ </html>
341
+ """
342
+
343
+ # Define a route for the agent creation interface
344
+ @app.route("/create_agent", methods=["GET", "POST"])
345
+ def create_agent_interface():
346
+ if request.method == "POST":
347
+ # Get the agent's name, type, and complexity
348
+ name = request.form["name"]
349
+ agent_type = request.form["type"]
350
+ complexity = int(request.form["complexity"])
351
+
352
+ # Create the agent
353
+ agent = create_agent(name, agent_type, complexity)
354
+
355
+ # Return a success message
356
+ return jsonify({"message": f"Agent {name} created successfully"})
357
+ else:
358
+ # Return the agent creation interface
359
+ return """
360
+ <html>
361
+ <body>
362
+ <h1>Create Agent</h1>
363
+ <form action="/create_agent" method="post">
364
+ <label for="name">Name:</label>
365
+ <input type="text" id="name" name="name"><br><br>
366
+ <label for="type">Type:</label>
367
+ <select id="type" name="type">
368
+ <option value="Task Executor">Task Executor</option>
369
+ <option value="Information Retriever">Information Retriever</option>
370
+ <option value="Decision Maker">Decision Maker</option>
371
+ <option value="Data Analyzer">Data Analyzer</option>
372
+ </select><br><br>
373
+ <label for="complexity">Complexity:</label>
374
+ <input type="number" id="complexity" name="complexity"><br><br>
375
+ <input type="submit" value="Create Agent">
376
+ </form>
377
+ </body>
378
+ </html>
379
+ """
380
+
381
+ # Define a route for the tool creation interface
382
+ @app.route("/create_tool", methods=["GET", "POST"])
383
+ def create_tool_interface():
384
+ if request.method == "POST":
385
+ # Get the tool's name and type
386
+ name = request.form["name"]
387
+ tool_type = request.form["type"]
388
+
389
+ # Create the tool
390
+ tool = create_tool(name, tool_type)
391
+
392
+ # Return a success message
393
+ return jsonify({"message": f"Tool {name} created successfully"})
394
  else:
395
+ # Return the tool creation interface
396
+ return """
397
+ <html>
398
+ <body>
399
+ <h1>Create Tool</h1>
400
+ <form action="/create_tool" method="post">
401
+ <label for="name">Name:</label>
402
+ <input type="text" id="name" name="name"><br><br>
403
+ <label for="type">Type:</label>
404
+ <select id="type" name="type">
405
+ <option value="Web Scraper">Web Scraper</option>
406
+ <option value="Database Connector">Database Connector</option>
407
+ <option value="API Caller">API Caller</option>
408
+ <option value="File Handler">File Handler</option>
409
+ <option value="Text Processor">Text Processor</option>
410
+ </select><br><br>
411
+ <input type="submit" value="Create Tool">
412
+ </form>
413
+ </body>
414
+ </html>
415
+ """
416
+
417
+ # Run the app
418
+ if __name__ == "__main__":
419
+ app.run(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420
 
421
  if __name__ == "__main__":
422
+ main()