acecalisto3 commited on
Commit
8ffb558
·
verified ·
1 Parent(s): e8fb655

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +384 -408
app.py CHANGED
@@ -1,416 +1,392 @@
1
  import os
2
- import sys
3
  import subprocess
4
- import streamlit as st
5
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
6
- import black
7
- from pylint import lint
8
- from io import StringIO
9
- import openai
10
-
11
- # Set your OpenAI API key here
12
- openai.api_key = "YOUR_OPENAI_API_KEY"
13
-
14
- HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
15
- PROJECT_ROOT = "projects"
16
- AGENT_DIRECTORY = "agents"
17
-
18
- # Global state to manage communication between Tool Box and Workspace Chat App
19
- if 'chat_history' not in st.session_state:
20
- st.session_state.chat_history = []
21
- if 'terminal_history' not in st.session_state:
22
- st.session_state.terminal_history = []
23
- if 'workspace_projects' not in st.session_state:
24
- st.session_state.workspace_projects = {}
25
- if 'available_agents' not in st.session_state:
26
- st.session_state.available_agents = []
27
- if 'current_state' not in st.session_state:
28
- st.session_state.current_state = {
29
- 'toolbox': {},
30
- 'workspace_chat': {}
31
- }
32
-
33
- class AIAgent:
34
- def __init__(self, name, description, skills):
35
  self.name = name
36
  self.description = description
37
- self.skills = skills
38
-
39
- def create_agent_prompt(self):
40
- skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
41
- agent_prompt = f"""
42
- As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
43
- {skills_str}
44
- I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
45
- """
46
- return agent_prompt
47
-
48
- def autonomous_build(self, chat_history, workspace_projects):
49
- """
50
- Autonomous build logic that continues based on the state of chat history and workspace projects.
51
  """
52
- summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
53
- summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
54
-
55
- next_step = "Based on the current state, the next logical step is to implement the main application logic."
56
-
57
- return summary, next_step
58
-
59
- def save_agent_to_file(agent):
60
- """Saves the agent's prompt to a file locally and then commits to the Hugging Face repository."""
61
- if not os.path.exists(AGENT_DIRECTORY):
62
- os.makedirs(AGENT_DIRECTORY)
63
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
64
- config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
65
- with open(file_path, "w") as file:
66
- file.write(agent.create_agent_prompt())
67
- with open(config_path, "w") as file:
68
- file.write(f"Agent Name: {agent.name}\nDescription: {agent.description}")
69
- st.session_state.available_agents.append(agent.name)
70
-
71
- commit_and_push_changes(f"Add agent {agent.name}")
72
-
73
- def load_agent_prompt(agent_name):
74
- """Loads an agent prompt from a file."""
75
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
76
- if os.path.exists(file_path):
77
- with open(file_path, "r") as file:
78
- agent_prompt = file.read()
79
- return agent_prompt
80
- else:
81
- return None
82
-
83
- def create_agent_from_text(name, text):
84
- skills = text.split('\n')
85
- agent = AIAgent(name, "AI agent created from text input.", skills)
86
- save_agent_to_file(agent)
87
- return agent.create_agent_prompt()
88
-
89
- # Chat interface using a selected agent
90
- def chat_interface_with_agent(input_text, agent_name):
91
- agent_prompt = load_agent_prompt(agent_name)
92
- if agent_prompt is None:
93
- return f"Agent {agent_name} not found."
94
-
95
- # Load the GPT-2 model which is compatible with AutoModelForCausalLM
96
- model_name = "gpt2"
97
- try:
98
- model = AutoModelForCausalLM.from_pretrained(model_name)
99
- tokenizer = AutoTokenizer.from_pretrained(model_name)
100
- generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
101
- except EnvironmentError as e:
102
- return f"Error loading model: {e}"
103
-
104
- # Combine the agent prompt with user input
105
- combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
106
-
107
- # Truncate input text to avoid exceeding the model's maximum length
108
- max_input_length = 900
109
- input_ids = tokenizer.encode(combined_input, return_tensors="pt")
110
- if input_ids.shape[1] > max_input_length:
111
- input_ids = input_ids[:, :max_input_length]
112
-
113
- # Generate chatbot response
114
- outputs = model.generate(
115
- input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id # Set pad_token_id to eos_token_id
116
- )
117
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
118
- return response
119
-
120
- def workspace_interface(project_name):
121
- project_path = os.path.join(PROJECT_ROOT, project_name)
122
- if not os.path.exists(PROJECT_ROOT):
123
- os.makedirs(PROJECT_ROOT)
124
- if not os.path.exists(project_path):
125
- os.makedirs(project_path)
126
- st.session_state.workspace_projects[project_name] = {"files": []}
127
- st.session_state.current_state['workspace_chat']['project_name'] = project_name
128
- commit_and_push_changes(f"Create project {project_name}")
129
- return f"Project {project_name} created successfully."
130
- else:
131
- return f"Project {project_name} already exists."
132
-
133
- def add_code_to_workspace(project_name, code, file_name):
134
- project_path = os.path.join(PROJECT_ROOT, project_name)
135
- if os.path.exists(project_path):
136
- file_path = os.path.join(project_path, file_name)
137
- with open(file_path, "w") as file:
138
- file.write(code)
139
- st.session_state.workspace_projects[project_name]["files"].append(file_name)
140
- st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
141
- commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
142
- return f"Code added to {file_name} in project {project_name} successfully."
143
- else:
144
- return f"Project {project_name} does not exist."
145
-
146
- def terminal_interface(command, project_name=None):
147
- if project_name:
148
- project_path = os.path.join(PROJECT_ROOT, project_name)
149
- if not os.path.exists(project_path):
150
- return f"Project {project_name} does not exist."
151
- result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
152
- else:
153
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
154
- if result.returncode == 0:
155
- st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
156
- return result.stdout
157
- else:
158
- st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
159
- return result.stderr
160
 
161
- def code_editor_interface(code):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162
  try:
163
- formatted_code = black.format_str(code, mode=black.FileMode())
164
- except black.NothingChanged:
165
- formatted_code = code
166
- result = StringIO()
167
- sys.stdout = result
168
- sys.stderr = result
169
- (pylint_stdout, pylint_stderr) = lint.py_run(code, return_std=True)
170
- sys.stdout = sys.__stdout__
171
- sys.stderr = sys.__stderr__
172
- lint_message = pylint_stdout.getvalue() + pylint_stderr.getvalue()
173
- st.session_state.current_state['toolbox']['formatted_code'] = formatted_code
174
- st.session_state.current_state['toolbox']['lint_message'] = lint_message
175
- return formatted_code, lint_message
176
-
177
- def summarize_text(text):
178
- summarizer = pipeline("summarization")
179
- summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
180
- st.session_state.current_state['toolbox']['summary'] = summary[0]['summary_text']
181
- return summary[0]['summary_text']
182
-
183
- def sentiment_analysis(text):
184
- analyzer = pipeline("sentiment-analysis")
185
- sentiment = analyzer(text)
186
- st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
187
- return sentiment[0]
188
-
189
- def translate_code(code, input_language, output_language):
190
- # Define a dictionary to map programming languages to their corresponding file extensions
191
- language_extensions = {
192
- # ignore the specific languages right now, and continue to EOF
193
- }
194
-
195
- # Add code to handle edge cases such as invalid input and unsupported programming languages
196
- if input_language not in language_extensions:
197
- raise ValueError(f"Invalid input language: {input_language}")
198
- if output_language not in language_extensions:
199
- raise ValueError(f"Invalid output language: {output_language}")
200
-
201
- # Use the dictionary to map the input and output languages to their corresponding file extensions
202
- input_extension = language_extensions[input_language]
203
- output_extension = language_extensions[output_language]
204
-
205
- # Translate the code using the OpenAI API
206
- prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
207
- response = openai.ChatCompletion.create(
208
- model="gpt-4",
209
- messages=[
210
- {"role": "system", "content": "You are an expert software developer."},
211
- {"role": "user", "content": prompt}
212
- ]
213
- )
214
- translated_code = response.choices[0].message['content'].strip()
215
-
216
- # Return the translated code
217
- translated_code = response.choices[0].message['content'].strip()
218
- st.session_state.current_state['toolbox']['translated_code'] = translated_code
219
- return translated_code
220
-
221
- def generate_code(code_idea):
222
- response = openai.ChatCompletion.create(
223
- model="gpt-4",
224
- messages=[
225
- {"role": "system", "content": "You are an expert software developer."},
226
- {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
227
- ]
228
- )
229
- generated_code = response.choices[0].message['content'].strip()
230
- st.session_state.current_state['toolbox']['generated_code'] = generated_code
231
- return generated_code
232
-
233
- def commit_and_push_changes(commit_message):
234
- """Commits and pushes changes to the Hugging Face repository."""
235
- commands = [
236
- "git add .",
237
- f"git commit -m '{commit_message}'",
238
- "git push"
239
- ]
240
- for command in commands:
241
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
242
- if result.returncode != 0:
243
- st.error(f"Error executing command '{command}': {result.stderr}")
244
- break
245
-
246
- # Streamlit App
247
- st.title("AI Agent Creator")
248
-
249
- # Sidebar navigation
250
- st.sidebar.title("Navigation")
251
- app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
252
-
253
- if app_mode == "AI Agent Creator":
254
- # AI Agent Creator
255
- st.header("Create an AI Agent from Text")
256
-
257
- st.subheader("From Text")
258
- agent_name = st.text_input("Enter agent name:")
259
- text_input = st.text_area("Enter skills (one per line):")
260
- if st.button("Create Agent"):
261
- agent_prompt = create_agent_from_text(agent_name, text_input)
262
- st.success(f"Agent '{agent_name}' created and saved successfully.")
263
- st.session_state.available_agents.append(agent_name)
264
-
265
- elif app_mode == "Tool Box":
266
- # Tool Box
267
- st.header("AI-Powered Tools")
268
-
269
- # Chat Interface
270
- st.subheader("Chat with CodeCraft")
271
- chat_input = st.text_area("Enter your message:")
272
- if st.button("Send"):
273
- if chat_input.startswith("@"):
274
- agent_name = chat_input.split(" ")[0][1:] # Extract agent_name from @agent_name
275
- chat_input = " ".join(chat_input.split(" ")[1:]) # Remove agent_name from input
276
- chat_response = chat_interface_with_agent(chat_input, agent_name)
277
  else:
278
- chat_response = chat_interface(chat_input)
279
- st.session_state.chat_history.append((chat_input, chat_response))
280
- st.write(f"CodeCraft: {chat_response}")
281
-
282
- # Terminal Interface
283
- st.subheader("Terminal")
284
- terminal_input = st.text_input("Enter a command:")
285
- if st.button("Run"):
286
- terminal_output = terminal_interface(terminal_input)
287
- st.session_state.terminal_history.append((terminal_input, terminal_output))
288
- st.code(terminal_output, language="bash")
289
-
290
- # Code Editor Interface
291
- st.subheader("Code Editor")
292
- code_editor = st.text_area("Write your code:", height=300)
293
- if st.button("Format & Lint"):
294
- formatted_code, lint_message = code_editor_interface(code_editor)
295
- st.code(formatted_code, language="python")
296
- st.info(lint_message)
297
-
298
- # Text Summarization Tool
299
- st.subheader("Summarize Text")
300
- text_to_summarize = st.text_area("Enter text to summarize:")
301
- if st.button("Summarize"):
302
- summary = summarize_text(text_to_summarize)
303
- st.write(f"Summary: {summary}")
304
-
305
- # Sentiment Analysis Tool
306
- st.subheader("Sentiment Analysis")
307
- sentiment_text = st.text_area("Enter text for sentiment analysis:")
308
- if st.button("Analyze Sentiment"):
309
- sentiment = sentiment_analysis(sentiment_text)
310
- st.write(f"Sentiment: {sentiment}")
311
-
312
- # Text Translation Tool (Code Translation)
313
- st.subheader("Translate Code")
314
- code_to_translate = st.text_area("Enter code to translate:")
315
- source_language = st.text_input("Enter source language (e.g. 'Python'):")
316
- target_language = st.text_input("Enter target language (e.g. 'JavaScript'):")
317
- if st.button("Translate Code"):
318
- translated_code = translate_code(code_to_translate, source_language, target_language)
319
- st.code(translated_code, language=target_language.lower())
320
-
321
- # Code Generation
322
- st.subheader("Code Generation")
323
- code_idea = st.text_input("Enter your code idea:")
324
- if st.button("Generate Code"):
325
- generated_code = generate_code(code_idea)
326
- st.code(generated_code, language="python")
327
-
328
- # Display Preset Commands
329
- st.subheader("Preset Commands")
330
- preset_commands = {
331
- "Create a new project": "create_project('project_name')",
332
- "Add code to workspace": "add_code_to_workspace('project_name', 'code', 'file_name')",
333
- "Run terminal command": "terminal_interface('command', 'project_name')",
334
- "Generate code": "generate_code('code_idea')",
335
- "Summarize text": "summarize_text('text')",
336
- "Analyze sentiment": "sentiment_analysis('text')",
337
- "Translate code": "translate_code('code', 'source_language', 'target_language')",
338
- }
339
- for command_name, command in preset_commands.items():
340
- st.write(f"{command_name}: `{command}`")
341
-
342
- elif app_mode == "Workspace Chat App":
343
- # Workspace Chat App
344
- st.header("Workspace Chat App")
345
-
346
- # Project Workspace Creation
347
- st.subheader("Create a New Project")
348
- project_name = st.text_input("Enter project name:")
349
- if st.button("Create Project"):
350
- workspace_status = workspace_interface(project_name)
351
- st.success(workspace_status)
352
-
353
- # Add Code to Workspace
354
- st.subheader("Add Code to Workspace")
355
- code_to_add = st.text_area("Enter code to add to workspace:")
356
- file_name = st.text_input("Enter file name (e.g. 'app.py'):")
357
- if st.button("Add Code"):
358
- add_code_status = add_code_to_workspace(project_name, code_to_add, file_name)
359
- st.success(add_code_status)
360
-
361
- # Terminal Interface with Project Context
362
- st.subheader("Terminal (Workspace Context)")
363
- terminal_input = st.text_input("Enter a command within the workspace:")
364
- if st.button("Run Command"):
365
- terminal_output = terminal_interface(terminal_input, project_name)
366
- st.code(terminal_output, language="bash")
367
-
368
- # Chat Interface for Guidance
369
- st.subheader("Chat with CodeCraft for Guidance")
370
- chat_input = st.text_area("Enter your message for guidance:")
371
- if st.button("Get Guidance"):
372
- chat_response = chat_interface(chat_input)
373
- st.session_state.chat_history.append((chat_input, chat_response))
374
- st.write(f"CodeCraft: {chat_response}")
375
-
376
- # Display Chat History
377
- st.subheader("Chat History")
378
- for user_input, response in st.session_state.chat_history:
379
- st.write(f"User: {user_input}")
380
- st.write(f"CodeCraft: {response}")
381
-
382
- # Display Terminal History
383
- st.subheader("Terminal History")
384
- for command, output in st.session_state.terminal_history:
385
- st.write(f"Command: {command}")
386
- st.code(output, language="bash")
387
-
388
- # Display Projects and Files
389
- st.subheader("Workspace Projects")
390
- for project, details in st.session_state.workspace_projects.items():
391
- st.write(f"Project: {project}")
392
- for file in details['files']:
393
- st.write(f" - {file}")
394
-
395
- # Chat with AI Agents
396
- st.subheader("Chat with AI Agents")
397
- selected_agent = st.selectbox("Select an AI agent", st.session_state.available_agents)
398
- agent_chat_input = st.text_area("Enter your message for the agent:")
399
- if st.button("Send to Agent"):
400
- agent_chat_response = chat_interface_with_agent(agent_chat_input, selected_agent)
401
- st.session_state.chat_history.append((agent_chat_input, agent_chat_response))
402
- st.write(f"{selected_agent}: {agent_chat_response}")
403
-
404
- # Automate Build Process
405
- st.subheader("Automate Build Process")
406
- if st.button("Automate"):
407
- agent = AIAgent(selected_agent, "", []) # Load the agent without skills for now
408
- summary, next_step = agent.autonomous_build(st.session_state.chat_history, st.session_state.workspace_projects)
409
- st.write("Autonomous Build Summary:")
410
- st.write(summary)
411
- st.write("Next Step:")
412
- st.write(next_step)
413
-
414
- # Display current state for debugging
415
- st.sidebar.subheader("Current State")
416
- st.sidebar.json(st.session_state.current_state)
 
1
  import os
 
2
  import subprocess
3
+ import random
4
+ from typing import List, Dict, Tuple
5
+ from datetime import datetime
6
+ import logging
7
+
8
+ import gradio as gr
9
+ from huggingface_hub import InferenceClient
10
+
11
+ # --- Configuration ---
12
+ MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Hugging Face model for text generation
13
+ MAX_HISTORY_TURNS = 5 # Number of previous turns to include in the prompt
14
+ MAX_TOKENS_PER_TURN = 2048 # Maximum number of tokens to generate per turn
15
+ VERBOSE_LOGGING = True # Enable verbose logging for debugging
16
+ DEFAULT_AGENT = "WEB_DEV" # Default agent to use
17
+
18
+ # --- Logging Setup ---
19
+ logging.basicConfig(
20
+ filename="app.log", # Name of the log file
21
+ level=logging.INFO, # Set the logging level (INFO, DEBUG, etc.)
22
+ format="%(asctime)s - %(levelname)s - %(message)s",
23
+ )
24
+
25
+ # --- Agent Definitions ---
26
+ class Agent:
27
+ """Base class for all agents."""
28
+
29
+ def __init__(self, name: str, description: str):
 
 
 
 
30
  self.name = name
31
  self.description = description
32
+
33
+ def handle_action(self, action: str, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
34
+ """Handles an action from the user.
35
+
36
+ Args:
37
+ action: The action name.
38
+ action_input: The input for the action.
39
+ history: The conversation history.
40
+ task: The current task.
41
+
42
+ Returns:
43
+ A tuple containing the next action name, action input, updated history, and updated task.
 
 
44
  """
45
+ raise NotImplementedError("Agent subclasses must implement handle_action.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
46
 
47
+ def get_prompt(self, message: str, history: List[Tuple[str, str]], task: str) -> str:
48
+ """Generates a prompt for the language model.
49
+
50
+ Args:
51
+ message: The user's message.
52
+ history: The conversation history.
53
+ task: The current task.
54
+
55
+ Returns:
56
+ The prompt string.
57
+ """
58
+ now = datetime.now()
59
+ date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
60
+ prompt = f"""
61
+ {date_time_str}
62
+ Agent: {self.name}
63
+ Task: {task}
64
+ History:
65
+ {self.format_history(history)}
66
+ Message: {message}
67
+ """
68
+ return prompt
69
+
70
+ def format_history(self, history: List[Tuple[str, str]]) -> str:
71
+ """Formats the conversation history for the prompt."""
72
+ formatted_history = ""
73
+ for user_message, agent_response in history[-MAX_HISTORY_TURNS:]:
74
+ formatted_history += f"[INST] {user_message} [/INST]\n{agent_response}\n"
75
+ return formatted_history
76
+
77
+ class WebDevAgent(Agent):
78
+ """Agent for web development tasks."""
79
+
80
+ def __init__(self):
81
+ super().__init__(name="WEB_DEV", description="Agent specialized in web development tasks.")
82
+
83
+ def handle_action(self, action: str, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
84
+ if action == "SEARCH":
85
+ return self._handle_search_action(action_input, history, task)
86
+ elif action == "GENERATE_HTML":
87
+ return self._handle_generate_html_action(action_input, history, task)
88
+ elif action == "GENERATE_CSS":
89
+ return self._handle_generate_css_action(action_input, history, task)
90
+ elif action == "GENERATE_JS":
91
+ return self._handle_generate_js_action(action_input, history, task)
92
+ elif action == "COMPLETE":
93
+ return "COMPLETE", "COMPLETE", history, task
94
+ else:
95
+ return "MAIN", None, history, task
96
+
97
+ def _handle_search_action(self, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
98
+ """Handles the SEARCH action."""
99
+ if VERBOSE_LOGGING:
100
+ logging.info(f"Calling SEARCH action with input: {action_input}")
101
+ try:
102
+ if "http" in action_input:
103
+ if "<" in action_input:
104
+ action_input = action_input.strip("<")
105
+ if ">" in action_input:
106
+ action_input = action_input.strip(">")
107
+ response = i_s(action_input) # Use i_search for web search
108
+ history.append(("observation: search result is:", response))
109
+ else:
110
+ history.append(("observation: I need a valid URL for the SEARCH action.", ""))
111
+ except Exception as e:
112
+ history.append(("observation:", str(e)))
113
+ return "MAIN", None, history, task
114
+
115
+ def _handle_generate_html_action(self, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
116
+ """Handles the GENERATE_HTML action."""
117
+ if VERBOSE_LOGGING:
118
+ logging.info(f"Calling GENERATE_HTML action with input: {action_input}")
119
+ # Simulate OpenAI's code generation capabilities using Hugging Face
120
+ prompt = self.get_prompt(f"Generate HTML code for a web page that {action_input}", history, task)
121
+ response = run_gpt(prompt, stop_tokens=["```", "```html"], max_tokens=MAX_TOKENS_PER_TURN)
122
+ history.append(("observation: generated HTML code:", response))
123
+ return "MAIN", None, history, task
124
+
125
+ def _handle_generate_css_action(self, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
126
+ """Handles the GENERATE_CSS action."""
127
+ if VERBOSE_LOGGING:
128
+ logging.info(f"Calling GENERATE_CSS action with input: {action_input}")
129
+ # Simulate OpenAI's code generation capabilities using Hugging Face
130
+ prompt = self.get_prompt(f"Generate CSS code for a web page that {action_input}", history, task)
131
+ response = run_gpt(prompt, stop_tokens=["```", "```css"], max_tokens=MAX_TOKENS_PER_TURN)
132
+ history.append(("observation: generated CSS code:", response))
133
+ return "MAIN", None, history, task
134
+
135
+ def _handle_generate_js_action(self, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
136
+ """Handles the GENERATE_JS action."""
137
+ if VERBOSE_LOGGING:
138
+ logging.info(f"Calling GENERATE_JS action with input: {action_input}")
139
+ # Simulate OpenAI's code generation capabilities using Hugging Face
140
+ prompt = self.get_prompt(f"Generate JavaScript code for a web page that {action_input}", history, task)
141
+ response = run_gpt(prompt, stop_tokens=["```", "```js"], max_tokens=MAX_TOKENS_PER_TURN)
142
+ history.append(("observation: generated JavaScript code:", response))
143
+ return "MAIN", None, history, task
144
+
145
+ class AiSystemPromptAgent(Agent):
146
+ """Agent for generating system prompts."""
147
+
148
+ def __init__(self):
149
+ super().__init__(name="AI_SYSTEM_PROMPT", description="Agent specialized in generating system prompts.")
150
+
151
+ def handle_action(self, action: str, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
152
+ if action == "GENERATE_PROMPT":
153
+ return self._handle_generate_prompt_action(action_input, history, task)
154
+ elif action == "COMPLETE":
155
+ return "COMPLETE", "COMPLETE", history, task
156
+ else:
157
+ return "MAIN", None, history, task
158
+
159
+ def _handle_generate_prompt_action(self, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
160
+ """Handles the GENERATE_PROMPT action."""
161
+ if VERBOSE_LOGGING:
162
+ logging.info(f"Calling GENERATE_PROMPT action with input: {action_input}")
163
+ # Simulate OpenAI's prompt generation capabilities using Hugging Face
164
+ prompt = self.get_prompt(f"Generate a system prompt for a language model that {action_input}", history, task)
165
+ response = run_gpt(prompt, stop_tokens=["```", "```json"], max_tokens=MAX_TOKENS_PER_TURN)
166
+ history.append(("observation: generated system prompt:", response))
167
+ return "MAIN", None, history, task
168
+
169
+ class PythonCodeDevAgent(Agent):
170
+ """Agent for Python code development tasks."""
171
+
172
+ def __init__(self):
173
+ super().__init__(name="PYTHON_CODE_DEV", description="Agent specialized in Python code development tasks.")
174
+
175
+ def handle_action(self, action: str, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
176
+ if action == "GENERATE_CODE":
177
+ return self._handle_generate_code_action(action_input, history, task)
178
+ elif action == "RUN_CODE":
179
+ return self._handle_run_code_action(action_input, history, task)
180
+ elif action == "COMPLETE":
181
+ return "COMPLETE", "COMPLETE", history, task
182
+ else:
183
+ return "MAIN", None, history, task
184
+
185
+ def _handle_generate_code_action(self, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
186
+ """Handles the GENERATE_CODE action."""
187
+ if VERBOSE_LOGGING:
188
+ logging.info(f"Calling GENERATE_CODE action with input: {action_input}")
189
+ # Simulate OpenAI's code generation capabilities using Hugging Face
190
+ prompt = self.get_prompt(f"Generate Python code that {action_input}", history, task)
191
+ response = run_gpt(prompt, stop_tokens=["```", "```python"], max_tokens=MAX_TOKENS_PER_TURN)
192
+ history.append(("observation: generated Python code:", response))
193
+ return "MAIN", None, history, task
194
+
195
+ def _handle_run_code_action(self, action_input: str, history: List[Tuple[str, str]], task: str) -> Tuple[str, str, List[Tuple[str, str]], str]:
196
+ """Handles the RUN_CODE action."""
197
+ if VERBOSE_LOGGING:
198
+ logging.info(f"Calling RUN_CODE action with input: {action_input}")
199
+ # Simulate OpenAI's code execution capabilities using Hugging Face
200
+ prompt = self.get_prompt(f"Run the following Python code and provide the output: {action_input}", history, task)
201
+ response = run_gpt(prompt, stop_tokens=["```", "```python"], max_tokens=MAX_TOKENS_PER_TURN)
202
+ history.append(("observation: code output:", response))
203
+ return "MAIN", None, history, task
204
+
205
+ # --- Action Handlers ---
206
+ def handle_main_action(action: str, action_input: str, history: List[Tuple[str, str]], task: str, agent: Agent) -> Tuple[str, str, List[Tuple[str, str]], str]:
207
+ """Handles the MAIN action, which is the default action."""
208
+ if VERBOSE_LOGGING:
209
+ logging.info(f"Calling MAIN action with input: {action_input}")
210
+ prompt = agent.get_prompt(action_input, history, task)
211
+ response = run_gpt(prompt, stop_tokens=["observation:", "task:", "action:", "thought:"], max_tokens=MAX_TOKENS_PER_TURN)
212
+ if VERBOSE_LOGGING:
213
+ logging.info(f"Response from model: {response}")
214
+ history.append((action_input, response))
215
+ lines = response.strip().strip("\n").split("\n")
216
+ for line in lines:
217
+ if line == "":
218
+ continue
219
+ if line.startswith("thought: "):
220
+ history.append((line, ""))
221
+ if VERBOSE_LOGGING:
222
+ logging.info(f"Thought: {line}")
223
+ elif line.startswith("action: "):
224
+ action_name, action_input = parse_action(line)
225
+ history.append((line, ""))
226
+ if VERBOSE_LOGGING:
227
+ logging.info(f"Action: {action_name} - {action_input}")
228
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
229
+ task = "END"
230
+ return action_name, action_input, history, task
231
+ else:
232
+ return action_name, action_input, history, task
233
+ else:
234
+ history.append((line, ""))
235
+ if VERBOSE_LOGGING:
236
+ logging.info(f"Other Output: {line}")
237
+ return "MAIN", None, history, task
238
+
239
+ def handle_update_task_action(action: str, action_input: str, history: List[Tuple[str, str]], task: str, agent: Agent) -> Tuple[str, str, List[Tuple[str, str]], str]:
240
+ """Handles the UPDATE-TASK action, which updates the current task."""
241
+ if VERBOSE_LOGGING:
242
+ logging.info(f"Calling UPDATE-TASK action with input: {action_input}")
243
+ prompt = agent.get_prompt(action_input, history, task)
244
+ task = run_gpt(prompt, stop_tokens=[], max_tokens=64).strip("\n")
245
+ history.append(("observation: task has been updated to:", task))
246
+ return "MAIN", None, history, task
247
+
248
+ def handle_search_action(action: str, action_input: str, history: List[Tuple[str, str]], task: str, agent: Agent) -> Tuple[str, str, List[Tuple[str, str]], str]:
249
+ """Handles the SEARCH action, which performs a web search."""
250
+ if VERBOSE_LOGGING:
251
+ logging.info(f"Calling SEARCH action with input: {action_input}")
252
  try:
253
+ if "http" in action_input:
254
+ if "<" in action_input:
255
+ action_input = action_input.strip("<")
256
+ if ">" in action_input:
257
+ action_input = action_input.strip(">")
258
+ response = i_s(action_input) # Use i_search for web search
259
+ history.append(("observation: search result is:", response))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260
  else:
261
+ history.append(("observation: I need a valid URL for the SEARCH action.", ""))
262
+ except Exception as e:
263
+ history.append(("observation:", str(e)))
264
+ return "MAIN", None, history, task
265
+
266
+ def handle_complete_action(action: str, action_input: str, history: List[Tuple[str, str]], task: str, agent: Agent) -> Tuple[str, str, List[Tuple[str, str]], str]:
267
+ """Handles the COMPLETE action, which ends the current task."""
268
+ if VERBOSE_LOGGING:
269
+ logging.info(f"Calling COMPLETE action.")
270
+ task = "END"
271
+ return "COMPLETE", "COMPLETE", history, task
272
+
273
+ # --- Action Mapping ---
274
+ ACTION_HANDLERS: Dict[str, callable] = {
275
+ "MAIN": handle_main_action,
276
+ "UPDATE-TASK": handle_update_task_action,
277
+ "SEARCH": handle_search_action,
278
+ "COMPLETE": handle_complete_action,
279
+ }
280
+
281
+ # --- Utility Functions ---
282
+ def run_gpt(prompt: str, stop_tokens: List[str], max_tokens: int) -> str:
283
+ """Runs the language model and returns the generated text."""
284
+ if VERBOSE_LOGGING:
285
+ logging.info(f"Prompt: {prompt}")
286
+ client = InferenceClient(MODEL_NAME)
287
+ resp = client.text_generation(prompt, max_new_tokens=max_tokens, stop_sequences=stop_tokens, temperature=0.7, top_p=0.8, repetition_penalty=1.5)
288
+ if VERBOSE_LOGGING:
289
+ logging.info(f"Response: {resp}")
290
+ return resp
291
+
292
+ def parse_action(line: str) -> Tuple[str, str]:
293
+ """Parses an action line to get the action name and input."""
294
+ parts = line.split(":", 1)
295
+ if len(parts) == 2:
296
+ action_name = parts[0].replace("action", "").strip()
297
+ action_input = parts[1].strip()
298
+ else:
299
+ action_name = parts[0].replace("action", "").strip()
300
+ action_input = ""
301
+ return action_name, action_input
302
+
303
+ def run_agent(purpose: str, history: List[Tuple[str, str]], agent: Agent) -> List[Tuple[str, str]]:
304
+ """Runs the agent and returns the updated conversation history."""
305
+ task = None
306
+ directory = "./"
307
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
308
+ action_input = None
309
+ while True:
310
+ if VERBOSE_LOGGING:
311
+ logging.info(f"---")
312
+ logging.info(f"Purpose: {purpose}")
313
+ logging.info(f"Task: {task}")
314
+ logging.info(f"---")
315
+ logging.info(f"History: {history}")
316
+ logging.info(f"---")
317
+ if VERBOSE_LOGGING:
318
+ logging.info(f"Running action: {action_name} - {action_input}")
319
+ try:
320
+ if "RESPONSE" in action_name or "COMPLETE" in action_name:
321
+ action_name = "COMPLETE"
322
+ task = "END"
323
+ return history
324
+ if action_name not in ACTION_HANDLERS:
325
+ action_name = "MAIN"
326
+ if action_name == "" or action_name is None:
327
+ action_name = "MAIN"
328
+ action_handler = ACTION_HANDLERS[action_name]
329
+ action_name, action_input, history, task = action_handler(action_name, action_input, history, task, agent)
330
+ yield history
331
+ if task == "END":
332
+ return history
333
+ except Exception as e:
334
+ history.append(("observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command", ""))
335
+ logging.error(f"Error in run_agent: {e}")
336
+ return history
337
+
338
+ # --- Gradio Interface ---
339
+ def main():
340
+ with gr.Blocks() as demo:
341
+ gr.Markdown("## FragMixt: Your No-Code Development Powerhouse")
342
+ gr.Markdown("### Agents w/ Agents: Mastering No-Code Development")
343
+
344
+ # Chat Interface
345
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
346
+
347
+ # Input Components
348
+ message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
349
+ purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
350
+ agent_name = gr.Dropdown(label="Agents", choices=[agent.name for agent in [WebDevAgent(), AiSystemPromptAgent(), PythonCodeDevAgent()]], value=DEFAULT_AGENT, interactive=True)
351
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
352
+ temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
353
+ max_new_tokens = gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
354
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
355
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
356
+
357
+ # Button to submit the message
358
+ submit_button = gr.Button(value="Send")
359
+
360
+ # Project Explorer Tab
361
+ with gr.Tab("Project Explorer"):
362
+ project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
363
+ explore_button = gr.Button(value="Explore")
364
+ project_output = gr.Textbox(label="File Tree", lines=20)
365
+
366
+ # Chat App Logic Tab
367
+ with gr.Tab("Chat App"):
368
+ history = gr.State([])
369
+ examples = [
370
+ ["What is the purpose of this AI agent?", "I am designed to assist with no-code development tasks."],
371
+ ["Can you help me generate a Python function to calculate the factorial of a number?", "Sure! Here is a Python function to calculate the factorial of a number:"],
372
+ ["Generate a web page with a heading that says 'Welcome to My Website!'", "action: GENERATE_HTML action_input=a heading that says 'Welcome to My Website!'"],
373
+ ]
374
+
375
+ def chat(purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history):
376
+ if agent_name == "WEB_DEV":
377
+ agent = WebDevAgent()
378
+ elif agent_name == "AI_SYSTEM_PROMPT":
379
+ agent = AiSystemPromptAgent()
380
+ elif agent_name == "PYTHON_CODE_DEV":
381
+ agent = PythonCodeDevAgent()
382
+ else:
383
+ agent = WebDevAgent() # Default to WEB_DEV if agent_name is invalid
384
+ history = list(run_agent(purpose, history, agent))
385
+ return history, history
386
+
387
+ submit_button.click(chat, inputs=[purpose, message, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, history], outputs=[chatbot, history])
388
+
389
+ demo.launch()
390
+
391
+ if __name__ == "__main__":
392
+ main()