acecalisto3 commited on
Commit
db55fcf
·
verified ·
1 Parent(s): ce1777c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +347 -370
app.py CHANGED
@@ -1,374 +1,351 @@
 
1
  import os
2
  import subprocess
3
- import random
4
- from huggingface_hub import InferenceClient
5
- import gradio as gr
6
- from safe_search import safe_search
7
- from i_search import google
8
- from i_search import i_search as i_s
9
- from agent import (
10
- ACTION_PROMPT,
11
- ADD_PROMPT,
12
- COMPRESS_HISTORY_PROMPT,
13
- LOG_PROMPT,
14
- LOG_RESPONSE,
15
- MODIFY_PROMPT,
16
- PREFIX,
17
- SEARCH_QUERY,
18
- READ_PROMPT,
19
- TASK_PROMPT,
20
- UNDERSTAND_TEST_RESULTS_PROMPT,
21
- )
22
- from utils import parse_action, parse_file_content, read_python_module_structure
23
  from datetime import datetime
24
- now = datetime.now()
25
- date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
26
-
27
- client = InferenceClient(
28
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
29
- )
30
-
31
-
32
- ############################################
33
-
34
-
35
- VERBOSE = True
36
- MAX_HISTORY = 125
37
-
38
-
39
- def format_prompt(message, history):
40
- prompt = "<s>"
41
- for user_prompt, bot_response in history:
42
- prompt += f"[INST] {user_prompt} [/INST]"
43
- prompt += f" {bot_response}</s> "
44
- prompt += f"[INST] {message} [/INST]"
45
- return prompt
46
-
47
-
48
-
49
- def run_gpt(
50
- prompt_template,
51
- stop_tokens,
52
- max_tokens,
53
- purpose,
54
- **prompt_kwargs,
55
- ):
56
- seed = random.randint(1,1111111111111111)
57
- print (seed)
58
- generate_kwargs = dict(
59
- temperature=1.0,
60
- max_new_tokens=2096,
61
- top_p=0.99,
62
- repetition_penalty=1.7,
63
- do_sample=True,
64
- seed=seed,
65
- )
66
-
67
-
68
- content = PREFIX.format(
69
- date_time_str=date_time_str,
70
- purpose=purpose,
71
- safe_search=safe_search,
72
- ) + prompt_template.format(**prompt_kwargs)
73
- if VERBOSE:
74
- print(LOG_PROMPT.format(content))
75
-
76
-
77
- #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
78
- #formatted_prompt = format_prompt(f'{content}', history)
79
-
80
- stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
81
- resp = ""
82
- for response in stream:
83
- resp += response.token.text
84
-
85
- if VERBOSE:
86
- print(LOG_RESPONSE.format(resp))
87
- return resp
88
-
89
-
90
- def compress_history(purpose, task, history, directory):
91
- resp = run_gpt(
92
- COMPRESS_HISTORY_PROMPT,
93
- stop_tokens=["observation:", "task:", "action:", "thought:"],
94
- max_tokens=5096,
95
- purpose=purpose,
96
- task=task,
97
- history=history,
98
- )
99
- history = "observation: {}\n".format(resp)
100
- return history
101
-
102
- def call_search(purpose, task, history, directory, action_input):
103
- print("CALLING SEARCH")
104
- try:
105
-
106
- if "http" in action_input:
107
- if "<" in action_input:
108
- action_input = action_input.strip("<")
109
- if ">" in action_input:
110
- action_input = action_input.strip(">")
111
-
112
- response = i_s(action_input)
113
- #response = google(search_return)
114
- print(response)
115
- history += "observation: search result is: {}\n".format(response)
116
- else:
117
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
118
- except Exception as e:
119
- history += "observation: {}'\n".format(e)
120
- return "MAIN", None, history, task
121
-
122
- def call_main(purpose, task, history, directory, action_input):
123
- resp = run_gpt(
124
- ACTION_PROMPT,
125
- stop_tokens=["observation:", "task:", "action:","though:"],
126
- max_tokens=5096,
127
- purpose=purpose,
128
- task=task,
129
- history=history,
130
- )
131
- lines = resp.strip().strip("\n").split("\n")
132
- for line in lines:
133
- if line == "":
134
- continue
135
- if line.startswith("thought: "):
136
- history += "{}\n".format(line)
137
- elif line.startswith("action: "):
138
-
139
- action_name, action_input = parse_action(line)
140
- print (f'ACTION_NAME :: {action_name}')
141
- print (f'ACTION_INPUT :: {action_input}')
142
-
143
- history += "{}\n".format(line)
144
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
145
- task = "END"
146
- return action_name, action_input, history, task
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  else:
148
- return action_name, action_input, history, task
149
- else:
150
- history += "{}\n".format(line)
151
- #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
152
-
153
- #return action_name, action_input, history, task
154
- #assert False, "unknown action: {}".format(line)
155
- return "MAIN", None, history, task
156
-
157
-
158
- def call_set_task(purpose, task, history, directory, action_input):
159
- task = run_gpt(
160
- TASK_PROMPT,
161
- stop_tokens=[],
162
- max_tokens=2048,
163
- purpose=purpose,
164
- task=task,
165
- history=history,
166
- ).strip("\n")
167
- history += "observation: task has been updated to: {}\n".format(task)
168
- return "MAIN", None, history, task
169
-
170
- def end_fn(purpose, task, history, directory, action_input):
171
- task = "END"
172
- return "COMPLETE", "COMPLETE", history, task
173
-
174
- NAME_TO_FUNC = {
175
- "MAIN": call_main,
176
- "UPDATE-TASK": call_set_task,
177
- "SEARCH": call_search,
178
- "COMPLETE": end_fn,
179
-
180
- }
181
-
182
- def run_action(purpose, task, history, directory, action_name, action_input):
183
- print(f'action_name::{action_name}')
184
- try:
185
- if "RESPONSE" in action_name or "COMPLETE" in action_name:
186
- action_name="COMPLETE"
187
- task="END"
188
- return action_name, "COMPLETE", history, task
189
-
190
- # compress the history when it is long
191
- if len(history.split("\n")) > MAX_HISTORY:
192
- if VERBOSE:
193
- print("COMPRESSING HISTORY")
194
- history = compress_history(purpose, task, history, directory)
195
- if not action_name in NAME_TO_FUNC:
196
- action_name="MAIN"
197
- if action_name == "" or action_name == None:
198
- action_name="MAIN"
199
- assert action_name in NAME_TO_FUNC
200
-
201
- print("RUN: ", action_name, action_input)
202
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
203
- except Exception as e:
204
- history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
205
-
206
- return "MAIN", None, history, task
207
-
208
- def run(purpose,history):
209
-
210
- #print(purpose)
211
- #print(hist)
212
- task=None
213
- directory="./"
214
- if history:
215
- history=str(history).strip("[]")
216
- if not history:
217
- history = ""
218
-
219
- action_name = "UPDATE-TASK" if task is None else "MAIN"
220
- action_input = None
221
- while True:
222
- print("")
223
- print("")
224
- print("---")
225
- print("purpose:", purpose)
226
- print("task:", task)
227
- print("---")
228
- print(history)
229
- print("---")
230
-
231
- action_name, action_input, history, task = run_action(
232
- purpose,
233
- task,
234
- history,
235
- directory,
236
- action_name,
237
- action_input,
238
- )
239
- yield (history)
240
- #yield ("",[(purpose,history)])
241
- if task == "END":
242
- return (history)
243
- #return ("", [(purpose,history)])
244
-
245
-
246
-
247
- ################################################
248
-
249
- def format_prompt(message, history):
250
- prompt = "<s>"
251
- for user_prompt, bot_response in history:
252
- prompt += f"[INST] {user_prompt} [/INST]"
253
- prompt += f" {bot_response}</s> "
254
- prompt += f"[INST] {message} [/INST]"
255
- return prompt
256
- agents =[
257
- "WEB_DEV",
258
- "AI_SYSTEM_PROMPT",
259
- "PYTHON_CODE_DEV"
260
- ]
261
- def generate(
262
- prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
263
- ):
264
- seed = random.randint(1,1111111111111111)
265
-
266
- agent=prompts.WEB_DEV
267
- if agent_name == "WEB_DEV":
268
- agent = prompts.WEB_DEV
269
- if agent_name == "AI_SYSTEM_PROMPT":
270
- agent = prompts.AI_SYSTEM_PROMPT
271
- if agent_name == "PYTHON_CODE_DEV":
272
- agent = prompts.PYTHON_CODE_DEV
273
- system_prompt=agent
274
- temperature = float(temperature)
275
- if temperature < 1e-2:
276
- temperature = 1e-2
277
- top_p = float(top_p)
278
-
279
- generate_kwargs = dict(
280
- temperature=temperature,
281
- max_new_tokens=max_new_tokens,
282
- top_p=top_p,
283
- repetition_penalty=repetition_penalty,
284
- do_sample=True,
285
- seed=seed,
286
- )
287
-
288
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
289
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
290
- output = ""
291
-
292
- for response in stream:
293
- output += response.token.text
294
- yield output
295
- return output
296
-
297
-
298
- additional_inputs=[
299
- gr.Dropdown(
300
- label="Agents",
301
- choices=[s for s in agents],
302
- value=agents[0],
303
- interactive=True,
304
- ),
305
- gr.Textbox(
306
- label="System Prompt",
307
- max_lines=1,
308
- interactive=True,
309
- ),
310
- gr.Slider(
311
- label="Temperature",
312
- value=0.9,
313
- minimum=0.0,
314
- maximum=1.0,
315
- step=0.05,
316
- interactive=True,
317
- info="Higher values produce more diverse outputs",
318
- ),
319
-
320
- gr.Slider(
321
- label="Max new tokens",
322
- value=1048*10,
323
- minimum=0,
324
- maximum=1048*10,
325
- step=64,
326
- interactive=True,
327
- info="The maximum numbers of new tokens",
328
- ),
329
- gr.Slider(
330
- label="Top-p (nucleus sampling)",
331
- value=0.90,
332
- minimum=0.0,
333
- maximum=1,
334
- step=0.05,
335
- interactive=True,
336
- info="Higher values sample more low-probability tokens",
337
- ),
338
- gr.Slider(
339
- label="Repetition penalty",
340
- value=1.2,
341
- minimum=1.0,
342
- maximum=2.0,
343
- step=0.05,
344
- interactive=True,
345
- info="Penalize repeated tokens",
346
- ),
347
-
348
-
349
- ]
350
-
351
-
352
- examples=[["Based on previous interactions, generate an interactive preview of the user's requested application.", None, None, None, None, None, ],
353
- ["Utilize the relevant code snippets and components from previous interactions.", None, None, None, None, None, ],
354
- ["Assemble a working demo that showcases the core functionality of the application.", None, None, None, None, None, ],
355
- ["Present the demo in an interactive environment within the Gradio interface.", None, None, None, None, None,],
356
- ["Allow the user to explore and interact with the demo to test its features.", None, None, None, None, None,],
357
- ["Gather feedback from the user about the demo and potential improvements.", None, None, None, None, None,],
358
- ["If the user approves of the app's running state you should provide a bash script that will automate all aspects of a local run and also a docker image for ease-of-launch in addition to the huggingface-ready app.py with all functions and gui and the requirements.txt file comprised of all required libraries and packages the application is dependent on, avoiding openai api at all points as we only use huggingface transformers, models, agents, libraries, api.", None, None, None, None, None,],
359
- ]
360
-
361
-
362
- gr.ChatInterface(
363
- fn=run,
364
- title="""Fragmixt || Agents With Agents || Surf With a Purpose""",
365
- examples=examples,
366
- concurrency_limit=20,
367
- ),
368
- def main():
369
- with gr.Blocks() as iface:
370
- # your code here
371
- pass
372
-
373
- if __name__ == "__main__":
374
- main()
 
1
+
2
  import os
3
  import subprocess
4
+ import streamlit as st
5
+ from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
6
+ import black
7
+ from pylint import lint
8
+ from io import StringIO
9
+ import openai
10
+ import sys
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  from datetime import datetime
12
+ import requests
13
+ from bs4 import BeautifulSoup
14
+ from typing import List, Dict, Optional
15
+
16
+ # Define custom exceptions for better error handling
17
+ class InvalidActionError(Exception):
18
+ """Raised when an invalid action is provided."""
19
+ pass
20
+
21
+ class InvalidInputError(Exception):
22
+ """Raised when invalid input is provided for an action."""
23
+ pass
24
+
25
+ class CodeGenerationError(Exception):
26
+ """Raised when code generation fails."""
27
+ pass
28
+
29
+ class CodeRefinementError(Exception):
30
+ """Raised when code refinement fails."""
31
+ pass
32
+
33
+ class CodeTestingError(Exception):
34
+ """Raised when code testing fails."""
35
+ pass
36
+
37
+ class CodeIntegrationError(Exception):
38
+ """Raised when code integration fails."""
39
+ pass
40
+
41
+ class AppTestingError(Exception):
42
+ """Raised when app testing fails."""
43
+ pass
44
+
45
+ class WorkspaceExplorerError(Exception):
46
+ """Raised when workspace exploration fails."""
47
+ pass
48
+
49
+ class PromptManagementError(Exception):
50
+ """Raised when prompt management fails."""
51
+ pass
52
+
53
+ class SearchError(Exception):
54
+ """Raised when search fails."""
55
+ pass
56
+
57
+ class AIAgent:
58
+ def __init__(self):
59
+ self.tools = {
60
+ "SEARCH": self.search,
61
+ "CODEGEN": self.code_generation,
62
+ "REFINE-CODE": self.refine_code,
63
+ "TEST-CODE": self.test_code,
64
+ "INTEGRATE-CODE": self.integrate_code,
65
+ "TEST-APP": self.test_app,
66
+ "GENERATE-REPORT": self.generate_report,
67
+ "WORKSPACE-EXPLORER": self.workspace_explorer,
68
+ "ADD_PROMPT": self.add_prompt,
69
+ "ACTION_PROMPT": self.action_prompt,
70
+ "COMPRESS_HISTORY_PROMPT": self.compress_history_prompt,
71
+ "LOG_PROMPT": self.log_prompt,
72
+ "LOG_RESPONSE": self.log_response,
73
+ "MODIFY_PROMPT": self.modify_prompt,
74
+ "PREFIX": self.prefix,
75
+ "SEARCH_QUERY": self.search_query,
76
+ "READ_PROMPT": self.read_prompt,
77
+ "TASK_PROMPT": self.task_prompt,
78
+ "UNDERSTAND_TEST_RESULTS_PROMPT": self.understand_test_results_prompt,
79
+ }
80
+ self.task_history: List[Dict[str, str]] = []
81
+ self.current_task: Optional[str] = None
82
+ self.search_engine_url: str = "https://www.google.com/search?q=" # Default search engine
83
+ self.prompts: List[str] = [] # Store prompts for future use
84
+ self.code_generator = pipeline('text-generation', model='gpt2') # Initialize code generator
85
+
86
+ def search(self, query: str) -> List[str]:
87
+ """
88
+ Performs a web search using the specified search engine.
89
+ """
90
+ search_url = self.search_engine_url + query
91
+ try:
92
+ response = requests.get(search_url)
93
+ response.raise_for_status() # Raise an exception for bad status codes
94
+ soup = BeautifulSoup(response.content, 'html.parser')
95
+ results = soup.find_all('a', href=True)
96
+ return [result['href'] for result in results]
97
+ except requests.exceptions.RequestException as e:
98
+ raise SearchError(f"Error during search: {e}")
99
+
100
+ def code_generation(self, snippet: str) -> str:
101
+ """
102
+ Generates code based on the provided snippet.
103
+ """
104
+ try:
105
+ generated_text = self.code_generator(snippet, max_length=500, num_return_sequences=1)[0]['generated_text']
106
+ return generated_text
107
+ except Exception as e:
108
+ raise CodeGenerationError(f"Error during code generation: {e}")
109
+
110
+ def refine_code(self, file_path: str) -> str:
111
+ """
112
+ Refines the code in the specified file.
113
+ """
114
+ try:
115
+ with open(file_path, 'r') as f:
116
+ code = f.read()
117
+ refined_code = black.format_str(code, mode=black.FileMode())
118
+ return refined_code
119
+ except black.InvalidInput:
120
+ raise CodeRefinementError("Error: Invalid code input for black formatting.")
121
+ except Exception as e:
122
+ raise CodeRefinementError(f"Error during code refinement: {e}")
123
+
124
+ def test_code(self, file_path: str) -> str:
125
+ """
126
+ Tests the code in the specified file.
127
+ """
128
+ try:
129
+ with open(file_path, 'r') as f:
130
+ code = f.read()
131
+ output = StringIO()
132
+ lint.run(code, output=output)
133
+ return output.getvalue()
134
+ except Exception as e:
135
+ raise CodeTestingError(f"Error during code testing: {e}")
136
+
137
+ def integrate_code(self, file_path: str, code_snippet: str) -> str:
138
+ """
139
+ Integrates the code snippet into the specified file.
140
+ """
141
+ try:
142
+ with open(file_path, 'a') as f:
143
+ f.write(code_snippet)
144
+ return "Code integrated successfully."
145
+ except Exception as e:
146
+ raise CodeIntegrationError(f"Error during code integration: {e}")
147
+
148
+ def test_app(self) -> str:
149
+ """
150
+ Tests the functionality of the app.
151
+ """
152
+ try:
153
+ subprocess.run(['streamlit', 'run', 'app.py'], check=True)
154
+ return "App tested successfully."
155
+ except subprocess.CalledProcessError as e:
156
+ raise AppTestingError(f"Error during app testing: {e}")
157
+
158
+ def generate_report(self) -> str:
159
+ """
160
+ Generates a report based on the task history.
161
+ """
162
+ report = f"## Task Report: {self.current_task}\n\n"
163
+ for task in self.task_history:
164
+ report += f"**Action:** {task['action']}\n"
165
+ report += f"**Input:** {task['input']}\n"
166
+ report += f"**Output:** {task['output']}\n\n"
167
+ return report
168
+
169
+ def workspace_explorer(self) -> str:
170
+ """
171
+ Provides a workspace explorer functionality.
172
+ """
173
+ try:
174
+ current_directory = os.getcwd()
175
+ directories = []
176
+ files = []
177
+ for item in os.listdir(current_directory):
178
+ item_path = os.path.join(current_directory, item)
179
+ if os.path.isdir(item_path):
180
+ directories.append(item)
181
+ elif os.path.isfile(item_path):
182
+ files.append(item)
183
+ return f"**Directories:** {directories}\n**Files:** {files}"
184
+ except Exception as e:
185
+ raise WorkspaceExplorerError(f"Error during workspace exploration: {e}")
186
+
187
+ def add_prompt(self, prompt: str) -> str:
188
+ """
189
+ Adds a new prompt to the agent's knowledge base.
190
+ """
191
+ try:
192
+ self.prompts.append(prompt)
193
+ return f"Prompt '{prompt}' added successfully."
194
+ except Exception as e:
195
+ raise PromptManagementError(f"Error adding prompt: {e}")
196
+
197
+ def action_prompt(self, action: str) -> str:
198
+ """
199
+ Provides a prompt for a specific action.
200
+ """
201
+ try:
202
+ if action == "SEARCH":
203
+ return "What do you want to search for?"
204
+ elif action == "CODEGEN":
205
+ return "Provide a code snippet to generate code from."
206
+ elif action == "REFINE-CODE":
207
+ return "Provide the file path of the code to refine."
208
+ elif action == "TEST-CODE":
209
+ return "Provide the file path of the code to test."
210
+ elif action == "INTEGRATE-CODE":
211
+ return "Provide the file path and code snippet to integrate."
212
+ elif action == "TEST-APP":
213
+ return "Test the application."
214
+ elif action == "GENERATE-REPORT":
215
+ return "Generate a report based on the task history."
216
+ elif action == "WORKSPACE-EXPLORER":
217
+ return "Explore the current workspace."
218
+ elif action == "ADD_PROMPT":
219
+ return "Enter the new prompt to add."
220
+ elif action == "ACTION_PROMPT":
221
+ return "Enter the action to get a prompt for."
222
+ elif action == "COMPRESS_HISTORY_PROMPT":
223
+ return "Compress the task history."
224
+ elif action == "LOG_PROMPT":
225
+ return "Enter the event to log."
226
+ elif action == "LOG_RESPONSE":
227
+ return "Log the specified event."
228
+ elif action == "MODIFY_PROMPT":
229
+ return "Enter the prompt to modify."
230
+ elif action == "PREFIX":
231
+ return "Enter the text to add a prefix to."
232
+ elif action == "SEARCH_QUERY":
233
+ return "Enter the topic to generate a search query for."
234
+ elif action == "READ_PROMPT":
235
+ return "Enter the file path to read."
236
+ elif action == "TASK_PROMPT":
237
+ return "Enter the new task to start."
238
+ elif action == "UNDERSTAND_TEST_RESULTS_PROMPT":
239
+ return "Enter your question about the test results."
240
+ else:
241
+ raise InvalidActionError("Please provide a valid action.")
242
+ except InvalidActionError as e:
243
+ raise e
244
+
245
+ def compress_history_prompt(self) -> str:
246
+ """
247
+ Provides a prompt to compress the task history.
248
+ """
249
+ return "Do you want to compress the task history?"
250
+
251
+ def log_prompt(self) -> str:
252
+ """
253
+ Provides a prompt to log a specific event.
254
+ """
255
+ return "What event do you want to log?"
256
+
257
+ def log_response(self, event: str) -> str:
258
+ """
259
+ Logs the specified event.
260
+ """
261
+ print(f"Event logged: {event}")
262
+ return "Event logged successfully."
263
+
264
+ def modify_prompt(self, prompt: str) -> str:
265
+ """
266
+ Modifies an existing prompt.
267
+ """
268
+ try:
269
+ # Find the prompt to modify
270
+ # Update the prompt
271
+ return f"Prompt '{prompt}' modified successfully."
272
+ except Exception as e:
273
+ raise PromptManagementError(f"Error modifying prompt: {e}")
274
+
275
+ def prefix(self, text: str) -> str:
276
+ """
277
+ Adds a prefix to the provided text.
278
+ """
279
+ return f"PREFIX: {text}"
280
+
281
+ def search_query(self, query: str) -> str:
282
+ """
283
+ Provides a search query for the specified topic.
284
+ """
285
+ return f"Search query: {query}"
286
+
287
+ def read_prompt(self, file_path: str) -> str:
288
+ """
289
+ Provides a prompt to read the contents of a file.
290
+ """
291
+ try:
292
+ with open(file_path, 'r') as f:
293
+ contents = f.read()
294
+ return contents
295
+ except Exception as e:
296
+ raise InvalidInputError(f"Error reading file: {e}")
297
+
298
+ def task_prompt(self) -> str:
299
+ """
300
+ Provides a prompt to start a new task.
301
+ """
302
+ return "What task do you want to start?"
303
+
304
+ def understand_test_results_prompt(self) -> str:
305
+ """
306
+ Provides a prompt to understand the test results.
307
+ """
308
+ return "What do you want to know about the test results?"
309
+
310
+ def handle_input(self, input_str: str):
311
+ """
312
+ Handles user input and executes the corresponding action.
313
+ """
314
+ try:
315
+ action, *args = input_str.split()
316
+ if action in self.tools:
317
+ if args:
318
+ self.task_history.append({
319
+ "action": action,
320
+ "input": " ".join(args),
321
+ "output": self.tools[action](" ".join(args))
322
+ })
323
+ else:
324
+ self.task_history.append({
325
+ "action": action,
326
+ "input": None,
327
+ "output": self.tools[action]()
328
+ })
329
+ print(f"Action: {action}\nInput: {' '.join(args)}\nOutput: {self.tools[action](' '.join(args))}")
330
  else:
331
+ raise InvalidActionError("Invalid action. Please choose a valid action from the list of tools.")
332
+ except (InvalidActionError, InvalidInputError, CodeGenerationError, CodeRefinementError,
333
+ CodeTestingError, CodeIntegrationError, AppTestingError, WorkspaceExplorerError,
334
+ PromptManagementError, SearchError) as e:
335
+ print(f"Error: {e}")
336
+
337
+ def run(self):
338
+ """
339
+ Runs the agent continuously, waiting for user input.
340
+ """
341
+ while True:
342
+ input_str = input("Enter a command for the AI Agent: ")
343
+ self.handle_input(input_str)
344
+
345
+ if __name__ == '__main__':
346
+ agent = AIAgent()
347
+ st.title("AI Agent")
348
+ st.write("Enter a command for the AI Agent:")
349
+ input_str = st.text_input("")
350
+ agent.handle_input(input_str)
351
+ agent.run()