acecalisto3 commited on
Commit
7611715
·
verified ·
1 Parent(s): 8173591

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +577 -349
app.py CHANGED
@@ -1,354 +1,582 @@
1
  import os
2
  import subprocess
3
- import streamlit as st
4
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
5
- import openai
6
-
7
- # Constants
8
- HUGGING_FACE_REPO_URL = "https://huggingface.co/spaces/acecalisto3/DevToolKit"
9
- PROJECT_ROOT = "projects"
10
- AGENT_DIRECTORY = "agents"
11
-
12
- # Initialize session state
13
- if 'chat_history' not in st.session_state:
14
- st.session_state.chat_history = []
15
- if 'terminal_history' not in st.session_state:
16
- st.session_state.terminal_history = []
17
- if 'workspace_projects' not in st.session_state:
18
- st.session_state.workspace_projects = {}
19
- if 'available_agents' not in st.session_state:
20
- st.session_state.available_agents = []
21
- if 'current_state' not in st.session_state:
22
- st.session_state.current_state = {
23
- 'toolbox': {},
24
- 'workspace_chat': {}
25
- }
26
-
27
- # AI Agent class
28
- class AIAgent:
29
- def __init__(self, name, description, skills):
30
- self.name = name
31
- self.description = description
32
- self.skills = skills
33
-
34
- def create_agent_prompt(self):
35
- skills_str = '\n'.join([f"* {skill}" for skill in self.skills])
36
- agent_prompt = f"""
37
- As an elite expert developer, my name is {self.name}. I possess a comprehensive understanding of the following areas:
38
- {skills_str}
39
- I am confident that I can leverage my expertise to assist you in developing and deploying cutting-edge web applications. Please feel free to ask any questions or present any challenges you may encounter.
40
- """
41
- return agent_prompt
42
-
43
- def autonomous_build(self, chat_history, workspace_projects):
44
- summary = "Chat History:\n" + "\n".join([f"User: {u}\nAgent: {a}" for u, a in chat_history])
45
- summary += "\n\nWorkspace Projects:\n" + "\n".join([f"{p}: {details}" for p, details in workspace_projects.items()])
46
- next_step = "Based on the current state, the next logical step is to implement the main application logic."
47
- return summary, next_step
48
-
49
- # Functions for agent management
50
- def save_agent_to_file(agent):
51
- if not os.path.exists(AGENT_DIRECTORY):
52
- os.makedirs(AGENT_DIRECTORY)
53
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}.txt")
54
- config_path = os.path.join(AGENT_DIRECTORY, f"{agent.name}Config.txt")
55
- with open(file_path, "w") as file:
56
- file.write(agent.create_agent_prompt())
57
- with open(config_path, "w") as file:
58
- file.write(f"Agent Name: {agent.name}\nDescription: {agent.description}")
59
- st.session_state.available_agents.append(agent.name)
60
- commit_and_push_changes(f"Add agent {agent.name}")
61
-
62
- def load_agent_prompt(agent_name):
63
- file_path = os.path.join(AGENT_DIRECTORY, f"{agent_name}.txt")
64
- if os.path.exists(file_path):
65
- with open(file_path, "r") as file:
66
- agent_prompt = file.read()
67
- return agent_prompt
68
- else:
69
- return None
70
-
71
- def create_agent_from_text(name, text):
72
- skills = text.split('\n')
73
- agent = AIAgent(name, "AI agent created from text input.", skills)
74
- save_agent_to_file(agent)
75
- return agent.create_agent_prompt()
76
-
77
- # OpenAI GPT-3 API setup for text generation
78
- openai.api_key = st.secrets["OPENAI_API_KEY"]
79
-
80
- # Initialize the Hugging Face model and tokenizer
81
- model_name = "gpt2"
82
- tokenizer = AutoTokenizer.from_pretrained(model_name)
83
- model = AutoModelForCausalLM.from_pretrained(model_name)
84
- generator = pipeline('text-generation', model=model, tokenizer=tokenizer)
85
-
86
- # Tool Box UI elements
87
- def toolbox():
88
- st.header("Tool Box")
89
-
90
- # List available agents
91
- for agent in st.session_state.available_agents:
92
- st.markdown(f"### {agent}")
93
- st.write(agent.description)
94
- if st.button(f'Chat with {agent}'):
95
- chat_with_agent(agent)
96
-
97
- # Add new agents
98
- if st.session_state['toolbox'].get('new_agent') is None:
99
- st.session_state['toolbox']['new_agent'] = {}
100
-
101
- st.text_input("Agent Name", key='name', on_change=update_agent)
102
- st.text_area("Agent Description", key='description', on_change=update_agent)
103
- st.text_input("Skills (comma-separated)", key='skills', on_change=update_agent)
104
-
105
- if st.button('Create New Agent'):
106
- skills = [s.strip() for s in st.session_state['toolbox']['new_agent'].get('skills', '').split(',')]
107
- new_agent = AIAgent(st.session_state['toolbox']['new_agent'].get('name'),
108
- st.session_state['toolbox']['new_agent'].get('description'), skills)
109
- st.session_state.available_agents.append(new_agent)
110
-
111
- def update_agent():
112
- st.session_state['toolbox']['new_agent'] = {
113
- 'name': st.session_state.name,
114
- 'description': st.session_state.description,
115
- 'skills': st.session_state.skills
116
- }
117
-
118
- def chat_with_agent(agent_name):
119
- st.subheader(f"Chat with {agent_name}")
120
- chat_input = st.text_area("Enter your message:")
121
- if st.button("Send"):
122
- chat_response = chat_interface_with_agent(chat_input, agent_name)
123
- st.session_state.chat_history.append((chat_input, chat_response))
124
- st.write(f"{agent_name}: {chat_response}")
125
-
126
- # Workspace UI elements
127
- def workspace():
128
- st.header("Workspace")
129
-
130
- # Project selection and interaction
131
- for project, details in st.session_state.workspace_projects.items():
132
- st.write(f"Project: {project}")
133
- for file in details['files']:
134
- st.write(f" - {file}")
135
-
136
- if st.button('Add New Project'):
137
- new_project = {'name': '', 'description': '', 'files': []}
138
- st.session_state.workspace_projects[new_project['name']] = new_project
139
-
140
- # Main function to display the app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141
  def main():
142
- toolbox()
143
- workspace()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
  if __name__ == "__main__":
146
- main()
147
-
148
- # Additional functionalities
149
- def commit_and_push_changes(commit_message):
150
- commands = [
151
- "git add .",
152
- f"git commit -m '{commit_message}'",
153
- "git push"
154
- ]
155
- for command in commands:
156
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
157
- if result.returncode != 0:
158
- st.error(f"Error executing command '{command}': {result.stderr}")
159
- break
160
-
161
- def chat_interface_with_agent(input_text, agent_name):
162
- agent_prompt = load_agent_prompt(agent_name)
163
- if agent_prompt is None:
164
- return f"Agent {agent_name} not found."
165
-
166
- combined_input = f"{agent_prompt}\n\nUser: {input_text}\nAgent:"
167
- max_input_length = 900
168
- input_ids = tokenizer.encode(combined_input, return_tensors="pt")
169
- if input_ids.shape[1] > max_input_length:
170
- input_ids = input_ids[:, :max_input_length]
171
-
172
- outputs = model.generate(input_ids, max_new_tokens=50, num_return_sequences=1, do_sample=True, pad_token_id=tokenizer.eos_token_id)
173
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
174
- return response
175
-
176
- def workspace_interface(project_name):
177
- project_path = os.path.join(PROJECT_ROOT, project_name)
178
- if not os.path.exists(PROJECT_ROOT):
179
- os.makedirs(PROJECT_ROOT)
180
- if not os.path.exists(project_path):
181
- os.makedirs(project_path)
182
- st.session_state.workspace_projects[project_name] = {"files": []}
183
- st.session_state.current_state['workspace_chat']['project_name'] = project_name
184
- commit_and_push_changes(f"Create project {project_name}")
185
- return f"Project {project_name} created successfully."
186
- else:
187
- return f"Project {project_name} already exists."
188
-
189
- def add_code_to_workspace(project_name, code, file_name):
190
- project_path = os.path.join(PROJECT_ROOT, project_name)
191
- if os.path.exists(project_path):
192
- file_path = os.path.join(project_path, file_name)
193
- with open(file_path, "w") as file:
194
- file.write(code)
195
- st.session_state.workspace_projects[project_name]["files"].append(file_name)
196
- st.session_state.current_state['workspace_chat']['added_code'] = {"file_name": file_name, "code": code}
197
- commit_and_push_changes(f"Add code to {file_name} in project {project_name}")
198
- return f"Code added to {file_name} in project {project_name} successfully."
199
- else:
200
- return f"Project {project_name} does not exist."
201
-
202
- def terminal_interface(command, project_name=None):
203
- if project_name:
204
- project_path = os.path.join(PROJECT_ROOT, project_name)
205
- if not os.path.exists(project_path):
206
- return f"Project {project_name} does not exist."
207
- result = subprocess.run(command, cwd=project_path, shell=True, capture_output=True, text=True)
208
- else:
209
- result = subprocess.run(command, shell=True, capture_output=True, text=True)
210
- if result.returncode == 0:
211
- st.session_state.current_state['toolbox']['terminal_output'] = result.stdout
212
- return result.stdout
213
- else:
214
- st.session_state.current_state['toolbox']['terminal_output'] = result.stderr
215
- return result.stderr
216
-
217
- def summarize_text(text):
218
- summarizer = pipeline("summarization")
219
- summary = summarizer(text, max_length=50, min_length=25, do_sample=False)
220
- st.session_state.current_state['toolbox']['summary'] = summary[0]['summary_text']
221
- return summary[0]['summary_text']
222
-
223
- def sentiment_analysis(text):
224
- analyzer = pipeline("sentiment-analysis")
225
- sentiment = analyzer(text)
226
- st.session_state.current_state['toolbox']['sentiment'] = sentiment[0]
227
- return sentiment[0]
228
-
229
- def generate_code(code_idea):
230
- response = openai.ChatCompletion.create(
231
- model="gpt-4",
232
- messages=[
233
- {"role": "system", "content": "You are an expert software developer."},
234
- {"role": "user", "content": f"Generate a Python code snippet for the following idea:\n\n{code_idea}"}
235
- ]
236
- )
237
- generated_code = response.choices[0].message['content'].strip()
238
- st.session_state.current_state['toolbox']['generated_code'] = generated_code
239
- return generated_code
240
-
241
- def translate_code(code, input_language, output_language):
242
- language_extensions = {
243
- "Python": ".py",
244
- "JavaScript": ".js",
245
- # Add more languages and their extensions here
246
- }
247
- if input_language not in language_extensions:
248
- raise ValueError(f"Invalid input language: {input_language}")
249
- if output_language not in language_extensions:
250
- raise ValueError(f"Invalid output language: {output_language}")
251
-
252
- prompt = f"Translate this code from {input_language} to {output_language}:\n\n{code}"
253
- response = openai.ChatCompletion.create(
254
- model="gpt-4",
255
- messages=[
256
- {"role": "system", "content": "You are an expert software developer."},
257
- {"role": "user", "content": prompt}
258
- ]
259
- )
260
- translated_code = response.choices[0].message['content'].strip()
261
- st.session_state.current_state['toolbox']['translated_code'] = translated_code
262
- return translated_code
263
-
264
- # Streamlit App
265
- st.title("AI Agent Creator")
266
-
267
- # Sidebar navigation
268
- st.sidebar.title("Navigation")
269
- app_mode = st.sidebar.selectbox("Choose the app mode", ["AI Agent Creator", "Tool Box", "Workspace Chat App"])
270
-
271
- if app_mode == "AI Agent Creator":
272
- # AI Agent Creator
273
- st.header("Create an AI Agent from Text")
274
-
275
- st.subheader("From Text")
276
- agent_name = st.text_input("Enter agent name:")
277
- text_input = st.text_area("Enter skills (one per line):")
278
- if st.button("Create Agent"):
279
- agent_prompt = create_agent_from_text(agent_name, text_input)
280
- st.success(f"Agent '{agent_name}' created and saved successfully.")
281
- st.session_state.available_agents.append(agent_name)
282
-
283
- elif app_mode == "Tool Box":
284
- # Tool Box
285
- st.header("AI-Powered Tools")
286
-
287
- # Chat Interface
288
- st.subheader("Chat with CodeCraft")
289
- chat_input = st.text_area("Enter your message:")
290
- if st.button("Send"):
291
- if chat_input.startswith("@"):
292
- agent_name = chat_input.split(" ")[0][1:] # Extract agent_name from @agent_name
293
- chat_input = " ".join(chat_input.split(" ")[1:]) # Remove agent_name from input
294
- chat_response = chat_interface_with_agent(chat_input, agent_name)
295
- st.session_state.chat_history.append((chat_input, chat_response))
296
- st.write(f"{agent_name}: {chat_response}")
297
-
298
- # Code Generation
299
- st.subheader("Generate Code")
300
- code_idea = st.text_area("Enter your code idea:")
301
- if st.button("Generate Code"):
302
- generated_code = generate_code(code_idea)
303
- st.code(generated_code, language='python')
304
-
305
- # Code Translation
306
- st.subheader("Translate Code")
307
- code = st.text_area("Enter your code:")
308
- input_language = st.selectbox("Input Language", ["Python", "JavaScript"])
309
- output_language = st.selectbox("Output Language", ["Python", "JavaScript"])
310
- if st.button("Translate Code"):
311
- translated_code = translate_code(code, input_language, output_language)
312
- st.code(translated_code, language=output_language.lower())
313
-
314
- # Summarization
315
- st.subheader("Summarize Text")
316
- text_to_summarize = st.text_area("Enter text to summarize:")
317
- if st.button("Summarize"):
318
- summary = summarize_text(text_to_summarize)
319
- st.write(summary)
320
-
321
- # Sentiment Analysis
322
- st.subheader("Sentiment Analysis")
323
- text_to_analyze = st.text_area("Enter text for sentiment analysis:")
324
- if st.button("Analyze Sentiment"):
325
- sentiment = sentiment_analysis(text_to_analyze)
326
- st.write(sentiment)
327
-
328
- elif app_mode == "Workspace Chat App":
329
- # Workspace Chat App
330
- st.header("Workspace Chat App")
331
-
332
- # Project Management
333
- st.subheader("Manage Projects")
334
- project_name = st.text_input("Enter project name:")
335
- if st.button("Create Project"):
336
- project_message = workspace_interface(project_name)
337
- st.success(project_message)
338
-
339
- # Add Code to Project
340
- st.subheader("Add Code to Project")
341
- project_name_for_code = st.text_input("Enter project name for code:")
342
- code_content = st.text_area("Enter code content:")
343
- file_name = st.text_input("Enter file name:")
344
- if st.button("Add Code"):
345
- add_code_message = add_code_to_workspace(project_name_for_code, code_content, file_name)
346
- st.success(add_code_message)
347
-
348
- # Terminal Interface
349
- st.subheader("Terminal Interface")
350
- terminal_command = st.text_area("Enter terminal command:")
351
- project_name_for_terminal = st.text_input("Enter project name for terminal (optional):")
352
- if st.button("Run Command"):
353
- terminal_output = terminal_interface(terminal_command, project_name_for_terminal)
354
- st.text(terminal_output)
 
1
  import os
2
  import subprocess
3
+ import random
4
+ from huggingface_hub import InferenceClient
5
+ import gradio as gr
6
+ from safe_search import safe_search # Make sure you have this function defined
7
+ from i_search import google
8
+ from i_search import i_search as i_s
9
+ from datetime import datetime
10
+ import logging
11
+ import json
12
+ import nltk # Import nltk for the generate_text_chunked function
13
+
14
+ nltk.download('punkt') # Download the punkt tokenizer if you haven't already
15
+
16
+ now = datetime.now()
17
+ date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
18
+
19
+ client = InferenceClient(
20
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
21
+ )
22
+
23
+ # --- Set up logging ---
24
+ logging.basicConfig(
25
+ filename="app.log", # Name of the log file
26
+ level=logging.INFO, # Set the logging level (INFO, DEBUG, etc.)
27
+ format="%(asctime)s - %(levelname)s - %(message)s",
28
+ )
29
+
30
+ agents = [
31
+ "WEB_DEV",
32
+ "AI_SYSTEM_PROMPT",
33
+ "PYTHON_CODE_DEV"
34
+ ]
35
+ ############################################
36
+
37
+ VERBOSE = True
38
+ MAX_HISTORY = 5
39
+ # MODEL = "gpt-3.5-turbo" # "gpt-4"
40
+
41
+ PREFIX = """
42
+ {date_time_str}
43
+ Purpose: {purpose}
44
+ Safe Search: {safe_search}
45
+ """
46
+
47
+ LOG_PROMPT = """
48
+ PROMPT: {content}
49
+ """
50
+
51
+ LOG_RESPONSE = """
52
+ RESPONSE: {resp}
53
+ """
54
+
55
+ COMPRESS_HISTORY_PROMPT = """
56
+ You are a helpful AI assistant. Your task is to compress the following history into a summary that is no longer than 512 tokens.
57
+ History:
58
+ {history}
59
+ """
60
+
61
+ ACTION_PROMPT = """
62
+ You are a helpful AI assistant. You are working on the task: {task}
63
+ Your current history is:
64
+ {history}
65
+ What is your next thought?
66
+ thought:
67
+ What is your next action?
68
+ action:
69
+ """
70
+
71
+ TASK_PROMPT = """
72
+ You are a helpful AI assistant. Your current history is:
73
+ {history}
74
+ What is the next task?
75
+ task:
76
+ """
77
+
78
+ UNDERSTAND_TEST_RESULTS_PROMPT = """
79
+ You are a helpful AI assistant. The test results are:
80
+ {test_results}
81
+ What do you want to know about the test results?
82
+ thought:
83
+ """
84
+
85
+ def format_prompt(message, history, max_history_turns=2):
86
+ prompt = "<s>"
87
+ # Keep only the last 'max_history_turns' turns
88
+ for user_prompt, bot_response in history[-max_history_turns:]:
89
+ prompt += f"[INST] {user_prompt} [/INST]"
90
+ prompt += f" {bot_response}</s> "
91
+ prompt += f"[INST] {message} [/INST]"
92
+ return prompt
93
+
94
+ def run_gpt(
95
+ prompt_template,
96
+ stop_tokens,
97
+ max_tokens,
98
+ purpose,
99
+ **prompt_kwargs,
100
+ ):
101
+ seed = random.randint(1,1111111111111111)
102
+ logging.info(f"Seed: {seed}") # Log the seed
103
+
104
+ content = PREFIX.format(
105
+ date_time_str=date_time_str,
106
+ purpose=purpose,
107
+ safe_search=safe_search,
108
+ ) + prompt_template.format(**prompt_kwargs)
109
+ if VERBOSE:
110
+ logging.info(LOG_PROMPT.format(content)) # Log the prompt
111
+
112
+ resp = client.text_generation(content, max_new_tokens=max_tokens, stop_sequences=stop_tokens, temperature=0.7, top_p=0.8, repetition_penalty=1.5)
113
+ if VERBOSE:
114
+ logging.info(LOG_RESPONSE.format(resp)) # Log the response
115
+ return resp
116
+
117
+ def generate(prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.7, max_new_tokens=2048, top_p=0.8, repetition_penalty=1.5, model="mistralai/Mixtral-8x7B-Instruct-v0.1"):
118
+ # Use 'prompt' here instead of 'message'
119
+ formatted_prompt = format_prompt(prompt, history, max_history_turns=5) # Truncated history
120
+ logging.info(f"Formatted Prompt: {formatted_prompt}")
121
+ stream = client.text_generation(formatted_prompt, temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, stream=True, details=True, return_full_text=False)
122
+ resp = ""
123
+ for response in stream:
124
+ resp += response.token.text
125
+
126
+ if VERBOSE:
127
+ logging.info(LOG_RESPONSE.format(resp)) # Log the response
128
+ return resp
129
+
130
+
131
+ def compress_history(purpose, task, history, directory):
132
+ resp = run_gpt(
133
+ COMPRESS_HISTORY_PROMPT,
134
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
135
+ max_tokens=512,
136
+ purpose=purpose,
137
+ task=task,
138
+ history=history,
139
+ )
140
+ history = "observation: {}\n".format(resp)
141
+ return history
142
+
143
+ def call_search(purpose, task, history, directory, action_input):
144
+ logging.info(f"CALLING SEARCH: {action_input}")
145
+ try:
146
+
147
+ if "http" in action_input:
148
+ if "<" in action_input:
149
+ action_input = action_input.strip("<")
150
+ if ">" in action_input:
151
+ action_input = action_input.strip(">")
152
+
153
+ response = i_s(action_input)
154
+ #response = google(search_return)
155
+ logging.info(f"Search Result: {response}")
156
+ history += "observation: search result is: {}\n".format(response)
157
+ else:
158
+ history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
159
+ except Exception as e:
160
+ history += "observation: {}'\n".format(e)
161
+ return "MAIN", None, history, task
162
+
163
+ def call_main(purpose, task, history, directory, action_input):
164
+ logging.info(f"CALLING MAIN: {action_input}")
165
+ resp = run_gpt(
166
+ ACTION_PROMPT,
167
+ stop_tokens=["observation:", "task:", "action:","thought:"],
168
+ max_tokens=32000,
169
+ purpose=purpose,
170
+ task=task,
171
+ history=history,
172
+ )
173
+ lines = resp.strip().strip("\n").split("\n")
174
+ for line in lines:
175
+ if line == "":
176
+ continue
177
+ if line.startswith("thought: "):
178
+ history += "{}\n".format(line)
179
+ logging.info(f"Thought: {line}")
180
+ elif line.startswith("action: "):
181
+
182
+ action_name, action_input = parse_action(line)
183
+ logging.info(f"Action: {action_name} - {action_input}")
184
+ history += "{}\n".format(line)
185
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
186
+ task = "END"
187
+ return action_name, action_input, history, task
188
+ else:
189
+ return action_name, action_input, history, task
190
+ else:
191
+ history += "{}\n".format(line)
192
+ logging.info(f"Other Output: {line}")
193
+ #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
194
+
195
+ #return action_name, action_input, history, task
196
+ #assert False, "unknown action: {}".format(line)
197
+ return "MAIN", None, history, task
198
+
199
+
200
+ def call_set_task(purpose, task, history, directory, action_input):
201
+ logging.info(f"CALLING SET_TASK: {action_input}")
202
+ task = run_gpt(
203
+ TASK_PROMPT,
204
+ stop_tokens=[],
205
+ max_tokens=64,
206
+ purpose=purpose,
207
+ task=task,
208
+ history=history,
209
+ ).strip("\n")
210
+ history += "observation: task has been updated to: {}\n".format(task)
211
+ return "MAIN", None, history, task
212
+
213
+ def end_fn(purpose, task, history, directory, action_input):
214
+ logging.info(f"CALLING END_FN: {action_input}")
215
+ task = "END"
216
+ return "COMPLETE", "COMPLETE", history, task
217
+
218
+ NAME_TO_FUNC = {
219
+ "MAIN": call_main,
220
+ "UPDATE-TASK": call_set_task,
221
+ "SEARCH": call_search,
222
+ "COMPLETE": end_fn,
223
+
224
+ }
225
+
226
+ def run_action(purpose, task, history, directory, action_name, action_input):
227
+ logging.info(f"RUNNING ACTION: {action_name} - {action_input}")
228
+ try:
229
+ if "RESPONSE" in action_name or "COMPLETE" in action_name:
230
+ action_name="COMPLETE"
231
+ task="END"
232
+ return action_name, "COMPLETE", history, task
233
+
234
+ # compress the history when it is long
235
+ if len(history.split("\n")) > MAX_HISTORY:
236
+ logging.info("COMPRESSING HISTORY")
237
+ history = compress_history(purpose, task, history, directory)
238
+ if not action_name in NAME_TO_FUNC:
239
+ action_name="MAIN"
240
+ if action_name == "" or action_name == None:
241
+ action_name="MAIN"
242
+ assert action_name in NAME_TO_FUNC
243
+
244
+ logging.info(f"RUN: {action_name} - {action_input}")
245
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
246
+ except Exception as e:
247
+ history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
248
+ logging.error(f"Error in run_action: {e}")
249
+ return "MAIN", None, history, task
250
+
251
+ def run(purpose,history):
252
+
253
+ #print(purpose)
254
+ #print(hist)
255
+ task=None
256
+ directory="./"
257
+ if history:
258
+ history=str(history).strip("[]")
259
+ if not history:
260
+ history = ""
261
+
262
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
263
+ action_input = None
264
+ while True:
265
+ logging.info(f"---")
266
+ logging.info(f"Purpose: {purpose}")
267
+ logging.info(f"Task: {task}")
268
+ logging.info(f"---")
269
+ logging.info(f"History: {history}")
270
+ logging.info(f"---")
271
+
272
+ action_name, action_input, history, task = run_action(
273
+ purpose,
274
+ task,
275
+ history,
276
+ directory,
277
+ action_name,
278
+ action_input,
279
+ )
280
+ yield (history)
281
+ #yield ("",[(purpose,history)])
282
+ if task == "END":
283
+ return (history)
284
+ #return ("", [(purpose,history)])
285
+
286
+
287
+
288
+ ################################################
289
+
290
+ def format_prompt(message, history, max_history_turns=5):
291
+ prompt = "<s>"
292
+ # Keep only the last 'max_history_turns' turns
293
+ for user_prompt, bot_response in history[-max_history_turns:]:
294
+ prompt += f"[INST] {user_prompt} [/INST]"
295
+ prompt += f" {bot_response}</s> "
296
+ prompt += f"[INST] {message} [/INST]"
297
+ return prompt
298
+ agents =[
299
+ "WEB_DEV",
300
+ "AI_SYSTEM_PROMPT",
301
+ "PYTHON_CODE_DEV"
302
+ ]
303
+ def generate(
304
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0, model="mistralai/Mixtral-8x7B-Instruct-v0.1"
305
+ ):
306
+ seed = random.randint(1,1111111111111111)
307
+
308
+ # Correct the line:
309
+ if agent_name == "WEB_DEV":
310
+ agent = "You are a helpful AI assistant. You are a web developer."
311
+ if agent_name == "AI_SYSTEM_PROMPT":
312
+ agent = "You are a helpful AI assistant. You are an AI system."
313
+ if agent_name == "PYTHON_CODE_DEV":
314
+ agent = "You are a helpful AI assistant. You are a Python code developer."
315
+ system_prompt = agent
316
+ temperature = float(temperature)
317
+ if temperature < 1e-2:
318
+ temperature = 1e-2
319
+ top_p = float(top_p)
320
+
321
+ # Add the system prompt to the beginning of the prompt
322
+ formatted_prompt = f"{system_prompt} {prompt}"
323
+
324
+ # Use 'prompt' here instead of 'message'
325
+ formatted_prompt = format_prompt(formatted_prompt, history, max_history_turns=5) # Truncated history
326
+ logging.info(f"Formatted Prompt: {formatted_prompt}")
327
+ stream = client.text_generation(formatted_prompt, temperature=temperature, max_new_tokens=max_new_tokens, top_p=top_p, repetition_penalty=repetition_penalty, stream=True, details=True, return_full_text=False)
328
+ resp = ""
329
+ for response in stream:
330
+ resp += response.token.text
331
+
332
+ if VERBOSE:
333
+ logging.info(LOG_RESPONSE.format(resp)) # Log the response
334
+ return resp
335
+
336
+
337
+ def generate_text_chunked(input_text, model, generation_parameters, max_tokens_to_generate):
338
+ """Generates text in chunks to avoid token limit errors."""
339
+ sentences = nltk.sent_tokenize(input_text)
340
+ generated_text = []
341
+ generator = pipeline('text-generation', model=model)
342
+
343
+ for sentence in sentences:
344
+ # Tokenize the sentence and check if it's within the limit
345
+ tokens = generator.tokenizer(sentence).input_ids
346
+ if len(tokens) + max_tokens_to_generate <= 32768:
347
+ # Generate text for this chunk
348
+ response = generator(sentence, max_length=max_tokens_to_generate, **generation_parameters)
349
+ generated_text.append(response[0]['generated_text'])
350
+ else:
351
+ # Handle cases where the sentence is too long
352
+ # You could split the sentence further or skip it
353
+ print(f"Sentence too long: {sentence}")
354
+
355
+ return ''.join(generated_text)
356
+
357
+ formatted_prompt = format_prompt(prompt, history, max_history_turns=5) # Truncated history
358
+ logging.info(f"Formatted Prompt: {formatted_prompt}")
359
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
360
+ output = ""
361
+
362
+ for response in stream:
363
+ output += response.token.text
364
+ yield output
365
+ return output
366
+
367
+
368
+ additional_inputs=[
369
+ gr.Dropdown(
370
+ label="Agents",
371
+ choices=[s for s in agents],
372
+ value=agents[0],
373
+ interactive=True,
374
+ ),
375
+ gr.Textbox(
376
+ label="System Prompt",
377
+ max_lines=1,
378
+ interactive=True,
379
+ ),
380
+ gr.Slider(
381
+ label="Temperature",
382
+ value=0.9,
383
+ minimum=0.0,
384
+ maximum=1.0,
385
+ step=0.05,
386
+ interactive=True,
387
+ info="Higher values produce more diverse outputs",
388
+ ),
389
+
390
+ gr.Slider(
391
+ label="Max new tokens",
392
+ value=1048*10,
393
+ minimum=0,
394
+ maximum=1048*10,
395
+ step=64,
396
+ interactive=True,
397
+ info="The maximum numbers of new tokens",
398
+ ),
399
+ gr.Slider(
400
+ label="Top-p (nucleus sampling)",
401
+ value=0.90,
402
+ minimum=0.0,
403
+ maximum=1,
404
+ step=0.05,
405
+ interactive=True,
406
+ info="Higher values sample more low-probability tokens",
407
+ ),
408
+ gr.Slider(
409
+ label="Repetition penalty",
410
+ value=1.2,
411
+ minimum=1.0,
412
+ maximum=2.0,
413
+ step=0.05,
414
+ interactive=True,
415
+ info="Penalize repeated tokens",
416
+ ),
417
+
418
+
419
+ ]
420
+
421
+ examples = [
422
+ ["Help me set up TypeScript configurations and integrate ts-loader in my existing React project.",
423
+ "Update Webpack Configurations",
424
+ "Install Dependencies",
425
+ "Configure Ts-Loader",
426
+ "TypeChecking Rules Setup",
427
+ "React Specific Settings",
428
+ "Compilation Options",
429
+ "Test Runner Configuration"],
430
+
431
+ ["Guide me through building a serverless microservice using AWS Lambda and API Gateway, connecting to DynamoDB for storage.",
432
+ "Set Up AWS Account",
433
+ "Create Lambda Function",
434
+ "APIGateway Integration",
435
+ "Define DynamoDB Table Scheme",
436
+ "Connect Service To DB",
437
+ "Add Authentication Layers",
438
+ "Monitor Metrics and Set Alarms"],
439
+
440
+ ["Migrate our current monolithic PHP application towards containerized services using Docker and Kubernetes for scalability.",
441
+ "Architectural Restructuring Plan",
442
+ "Containerisation Process With Docker",
443
+ "Service Orchestration With Kubernetes",
444
+ "Load Balancing Strategies",
445
+ "Persistent Storage Solutions",
446
+ "Network Policies Enforcement",
447
+ "Continuous Integration / Continuous Delivery"],
448
+
449
+ ["Provide guidance on integrating WebAssembly modules compiled from C++ source files into an ongoing web project.",
450
+ "Toolchain Selection (Emscripten vs. LLVM)",
451
+ "Setting Up Compiler Environment",
452
+ ".cpp Source Preparation",
453
+ "Module Building Approach",
454
+ "Memory Management Considerations",
455
+ "Performance Tradeoffs",
456
+ "Seamless Web Assembly Embedding"]
457
+ ]
458
+
459
+ def parse_action(line):
460
+ action_name, action_input = line.strip("action: ").split("=")
461
+ action_input = action_input.strip()
462
+ return action_name, action_input
463
+
464
+ def get_file_tree(path):
465
+ """
466
+ Recursively explores a directory and returns a nested dictionary representing its file tree.
467
+ """
468
+ tree = {}
469
+ for item in os.listdir(path):
470
+ item_path = os.path.join(path, item)
471
+ if os.path.isdir(item_path):
472
+ tree[item] = get_file_tree(item_path)
473
+ else:
474
+ tree[item] = None
475
+ return tree
476
+
477
+ def display_file_tree(tree, indent=0):
478
+ """
479
+ Prints a formatted representation of the file tree.
480
+ """
481
+ for name, subtree in tree.items():
482
+ print(f"{' ' * indent}{name}")
483
+ if subtree is not None:
484
+ display_file_tree(subtree, indent + 1)
485
+
486
+ def project_explorer(path):
487
+ """
488
+ Displays the file tree of a given path in a Streamlit app.
489
+ """
490
+ tree = get_file_tree(path)
491
+ tree_str = json.dumps(tree, indent=4) # Convert the tree to a string for display
492
+ return tree_str
493
+
494
+ def chat_app_logic(message, history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, model):
495
+ # Your existing code here
496
+
497
+ try:
498
+ # Pass 'message' as 'prompt'
499
+ response = ''.join(generate(
500
+ model=model,
501
+ prompt=message, # Use 'prompt' here
502
+ history=history,
503
+ agent_name=agent_name,
504
+ sys_prompt=sys_prompt,
505
+ temperature=temperature,
506
+ max_new_tokens=max_new_tokens,
507
+ top_p=top_p,
508
+ repetition_penalty=repetition_penalty,
509
+ ))
510
+ except TypeError:
511
+ # ... (rest of the exception handling)
512
+
513
+ response_parts = []
514
+ for part in generate(
515
+ model=model,
516
+ prompt=message, # Use 'prompt' here
517
+ history=history,
518
+ agent_name=agent_name,
519
+ sys_prompt=sys_prompt,
520
+ temperature=temperature,
521
+ max_new_tokens=max_new_tokens,
522
+ top_p=top_p,
523
+ repetition_penalty=repetition_penalty,
524
+ ):
525
+ if isinstance(part, str):
526
+ response_parts.append(part)
527
+ elif isinstance(part, dict) and 'content' in part:
528
+ response_parts.append(part['content'])
529
+
530
+ response = ''.join(response_parts)
531
+ history.append((message, response))
532
+ return history
533
+
534
+ history.append((message, response))
535
+ return history
536
+
537
  def main():
538
+ with gr.Blocks() as demo:
539
+ gr.Markdown("## FragMixt")
540
+ gr.Markdown("### Agents w/ Agents")
541
+
542
+ # Chat Interface
543
+ chatbot = gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel")
544
+ #chatbot.load(examples)
545
+
546
+ # Input Components
547
+ message = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
548
+ purpose = gr.Textbox(label="Purpose", placeholder="What is the purpose of this interaction?")
549
+ agent_name = gr.Dropdown(label="Agents", choices=[s for s in agents], value=agents[0], interactive=True)
550
+ sys_prompt = gr.Textbox(label="System Prompt", max_lines=1, interactive=True)
551
+ temperature = gr.Slider(label="Temperature", value=0.9, minimum=0.0, maximum=1.0, step=0.05, interactive=True, info="Higher values produce more diverse outputs")
552
+ max_new_tokens = gr.Slider(label="Max new tokens", value=1048*10, minimum=0, maximum=1048*10, step=64, interactive=True, info="The maximum numbers of new tokens")
553
+ top_p = gr.Slider(label="Top-p (nucleus sampling)", value=0.90, minimum=0.0, maximum=1, step=0.05, interactive=True, info="Higher values sample more low-probability tokens")
554
+ repetition_penalty = gr.Slider(label="Repetition penalty", value=1.2, minimum=1.0, maximum=2.0, step=0.05, interactive=True, info="Penalize repeated tokens")
555
+ model_input = gr.Textbox(label="Model", value="mistralai/Mixtral-8x7B-Instruct-v0.1", visible=False)
556
+
557
+ # Button to submit the message
558
+ submit_button = gr.Button(value="Send")
559
+
560
+ # Project Explorer Tab
561
+ with gr.Tab("Project Explorer"):
562
+ project_path = gr.Textbox(label="Project Path", placeholder="/home/user/app/current_project")
563
+ explore_button = gr.Button(value="Explore")
564
+ project_output = gr.Textbox(label="File Tree", lines=20)
565
+
566
+ # Chat App Logic Tab
567
+ with gr.Tab("Chat App"):
568
+ history = gr.State([])
569
+ for example in examples:
570
+ gr.Button(value=example[0]).click(lambda: chat_app_logic(example[0], history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, model=model_input), outputs=chatbot)
571
+
572
+ # Connect components to the chat app logic
573
+ submit_button.click(chat_app_logic, inputs=[message, history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, model_input], outputs=chatbot)
574
+ message.submit(chat_app_logic, inputs=[message, history, purpose, agent_name, sys_prompt, temperature, max_new_tokens, top_p, repetition_penalty, model_input], outputs=chatbot)
575
+
576
+ # Connect components to the project explorer
577
+ explore_button.click(project_explorer, inputs=project_path, outputs=project_output)
578
+
579
+ demo.launch(show_api=True)
580
 
581
  if __name__ == "__main__":
582
+ main()