acecalisto3 commited on
Commit
20d2c96
·
verified ·
1 Parent(s): fdf1185

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +380 -511
app.py CHANGED
@@ -1,520 +1,389 @@
1
  import os
2
  import subprocess
3
- import streamlit as st
 
4
  import gradio as gr
5
- from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
6
- import black
7
- from pylint import lint
8
- from io import StringIO
9
- import sys
 
 
 
 
 
 
 
 
 
 
 
 
10
  from datetime import datetime
11
- import requests
12
- from bs4 import BeautifulSoup
13
- from typing import List, Dict, Optional
14
-
15
- # --- Custom Exceptions for Enhanced Error Handling ---
16
- class InvalidActionError(Exception):
17
- """Raised when an invalid action is provided."""
18
- pass
19
-
20
-
21
- class InvalidInputError(Exception):
22
- """Raised when invalid input is provided for an action."""
23
- pass
24
-
25
-
26
- class CodeGenerationError(Exception):
27
- """Raised when code generation fails."""
28
- pass
29
-
30
-
31
- class AppTestingError(Exception):
32
- """Raised when app testing fails."""
33
- pass
34
-
35
-
36
- class WorkspaceExplorerError(Exception):
37
- """Raised when workspace exploration fails."""
38
- pass
39
-
40
-
41
- class PromptManagementError(Exception):
42
- """Raised when prompt management fails."""
43
- pass
44
-
45
-
46
- class SearchError(Exception):
47
- """Raised when search fails."""
48
- pass
49
-
50
-
51
- class CodeRefinementError(Exception):
52
- """Raised when code refinement fails."""
53
- pass
54
-
55
-
56
- class CodeTestingError(Exception):
57
- """Raised when code testing fails."""
58
- pass
59
-
60
-
61
- class CodeIntegrationError(Exception):
62
- """Raised when code integration fails."""
63
- pass
64
-
65
- # --- AI Agent Class ---
66
- class AIAgent:
67
- def __init__(self):
68
- # --- Initialize Tools and Attributes ---
69
- self.tools = {
70
- "SEARCH": self.search,
71
- "CODEGEN": self.code_generation,
72
- "REFINE-CODE": self.refine_code, # Use internal function
73
- "TEST-CODE": self.test_code, # Use internal function
74
- "INTEGRATE-CODE": self.integrate_code, # Use internal function
75
- "TEST-APP": self.test_app,
76
- "GENERATE-REPORT": self.generate_report,
77
- "WORKSPACE-EXPLORER": self.workspace_explorer,
78
- "ADD_PROMPT": self.add_prompt,
79
- "ACTION_PROMPT": self.action_prompt,
80
- "COMPRESS_HISTORY_PROMPT": self.compress_history_prompt,
81
- "LOG_PROMPT": self.log_prompt,
82
- "LOG_RESPONSE": self.log_response,
83
- "MODIFY_PROMPT": self.modify_prompt,
84
- "PREFIX": self.prefix,
85
- "SEARCH_QUERY": self.search_query,
86
- "READ_PROMPT": self.read_prompt,
87
- "TASK_PROMPT": self.task_prompt,
88
- "UNDERSTAND_TEST_RESULTS_PROMPT": self.understand_test_results_prompt,
89
- }
90
- self.task_history: List[Dict[str, str]] = []
91
- self.current_task: Optional[str] = None
92
- self.search_engine_url: str = "https://www.google.com/search?q=" # Default search engine
93
- self.prompts: List[str] = [] # Store prompts for future use
94
- self.code_generator = None # Initialize code generator later
95
- self.available_models = [
96
- "gpt2",
97
- "facebook/bart-large-cnn",
98
- "google/flan-t5-xl",
99
- "bigscience/T0_3B",
100
- ] # Add more as needed
101
- self.selected_model = "gpt2" # Default model
102
-
103
- # --- Search Functionality ---
104
- def search(self, query: str) -> List[str]:
105
- """Performs a web search using the specified search engine."""
106
- search_url = self.search_engine_url + query
107
- try:
108
- response = requests.get(search_url)
109
- response.raise_for_status() # Raise an exception for bad status codes
110
- soup = BeautifulSoup(response.content, 'html.parser')
111
- results = soup.find_all('a', href=True)
112
- return [result['href'] for result in results]
113
- except requests.exceptions.RequestException as e:
114
- raise SearchError(f"Error during search: {e}")
115
-
116
- # --- Code Generation Functionality ---
117
- def code_generation(self, snippet: str) -> str:
118
- """Generates code based on the provided snippet or description."""
119
- try:
120
- if not self.code_generator:
121
- self.code_generator = pipeline(
122
- 'text-generation', model=self.selected_model
123
- )
124
- generated_text = self.code_generator(
125
- snippet, max_length=500, num_return_sequences=1
126
- )[0]['generated_text']
127
- return generated_text
128
- except Exception as e:
129
- raise CodeGenerationError(f"Error during code generation: {e}")
130
-
131
- # --- Code Refinement Functionality ---
132
- def refine_code(self, code: str) -> str:
133
- """Refines the provided code string."""
134
- try:
135
- refined_code = black.format_str(code, mode=black.FileMode())
136
- return refined_code
137
- except black.InvalidInput:
138
- raise CodeRefinementError("Error: Invalid code input for black formatting.")
139
- except Exception as e:
140
- raise CodeRefinementError(f"Error during code refinement: {e}")
141
-
142
- # --- Code Testing Functionality ---
143
- def test_code(self, code: str) -> str:
144
- """Tests the provided code string using pylint."""
145
- try:
146
- # Use pylint to lint the code
147
- lint_output = StringIO()
148
- sys.stdout = lint_output
149
- lint.Run(code.split('\n'), do_exit=False)
150
- sys.stdout = sys.__stdout__
151
- return lint_output.getvalue()
152
- except Exception as e:
153
- raise CodeTestingError(f"Error during code testing: {e}")
154
-
155
- # --- Code Integration Functionality ---
156
- def integrate_code(self, file_path: str, code_snippet: str) -> str:
157
- """Integrates the code snippet into the specified file."""
158
- try:
159
- # For simplicity, we'll just append the code snippet to the file
160
- # In a real scenario, you'd need more sophisticated logic
161
- with open(file_path, 'a') as f:
162
- f.write(code_snippet)
163
- return f"Code snippet integrated into {file_path}"
164
- except Exception as e:
165
- raise CodeIntegrationError(f"Error during code integration: {e}")
166
-
167
- # --- App Testing Functionality ---
168
- def test_app(self) -> str:
169
- """Tests the functionality of the app."""
170
- try:
171
- subprocess.run(['streamlit', 'run', 'app.py'], check=True)
172
- return "App tested successfully."
173
- except subprocess.CalledProcessError as e:
174
- raise AppTestingError(f"Error during app testing: {e}")
175
-
176
- # --- Report Generation Functionality ---
177
- def generate_report(self) -> str:
178
- """Generates a report based on the task history."""
179
- report = f"## Task Report: {self.current_task}\n\n"
180
- for task in self.task_history:
181
- report += f"**Action:** {task['action']}\n"
182
- report += f"**Input:** {task['input']}\n"
183
- report += f"**Output:** {task['output']}\n\n"
184
- return report
185
-
186
- # --- Workspace Exploration Functionality ---
187
- def workspace_explorer(self) -> str:
188
- """Provides a workspace explorer functionality."""
189
- try:
190
- current_directory = os.getcwd()
191
- directories = []
192
- files = []
193
- for item in os.listdir(current_directory):
194
- item_path = os.path.join(current_directory, item)
195
- if os.path.isdir(item_path):
196
- directories.append(item)
197
- elif os.path.isfile(item_path):
198
- files.append(item)
199
- return f"**Directories:** {directories}\n**Files:** {files}"
200
- except Exception as e:
201
- raise WorkspaceExplorerError(f"Error during workspace exploration: {e}")
202
-
203
- # --- Prompt Management Functionality ---
204
- def add_prompt(self, prompt: str) -> str:
205
- """Adds a new prompt to the agent's knowledge base."""
206
- try:
207
- self.prompts.append(prompt)
208
- return f"Prompt '{prompt}' added successfully."
209
- except Exception as e:
210
- raise PromptManagementError(f"Error adding prompt: {e}")
211
-
212
- # --- Prompt Generation Functionality ---
213
- def action_prompt(self, action: str) -> str:
214
- """Provides a prompt for a specific action."""
215
- try:
216
- if action == "SEARCH":
217
- return "What do you want to search for?"
218
- elif action == "CODEGEN":
219
- return "Provide a code snippet to generate code from, or describe what you want the code to do."
220
- elif action == "REFINE-CODE":
221
- return "Provide the code to refine."
222
- elif action == "TEST-CODE":
223
- return "Provide the code to test."
224
- elif action == "INTEGRATE-CODE":
225
- return "Provide the file path and code snippet to integrate. For example: /path/to/your/file.py \"\"\"print('Hello, World!')\"\"\""
226
- elif action == "TEST-APP":
227
- return "Test the application."
228
- elif action == "GENERATE-REPORT":
229
- return "Generate a report based on the task history."
230
- elif action == "WORKSPACE-EXPLORER":
231
- return "Explore the current workspace."
232
- elif action == "ADD_PROMPT":
233
- return "Enter the new prompt to add."
234
- elif action == "ACTION_PROMPT":
235
- return "Enter the action to get a prompt for."
236
- elif action == "COMPRESS_HISTORY_PROMPT":
237
- return "Compress the task history."
238
- elif action == "LOG_PROMPT":
239
- return "Enter the event to log."
240
- elif action == "LOG_RESPONSE":
241
- return "Log the specified event."
242
- elif action == "MODIFY_PROMPT":
243
- return "Enter the prompt to modify."
244
- elif action == "PREFIX":
245
- return "Enter the text to add a prefix to."
246
- elif action == "SEARCH_QUERY":
247
- return "Enter the topic to generate a search query for."
248
- elif action == "READ_PROMPT":
249
- return "Enter the file path to read."
250
- elif action == "TASK_PROMPT":
251
- return "Enter the new task to start."
252
- elif action == "UNDERSTAND_TEST_RESULTS_PROMPT":
253
- return "Enter your question about the test results."
254
- else:
255
- raise InvalidActionError("Please provide a valid action.")
256
- except InvalidActionError as e:
257
- raise e
258
-
259
- # --- Prompt Generation Functionality ---
260
- def compress_history_prompt(self) -> str:
261
- """Provides a prompt to compress the task history."""
262
- return "Do you want to compress the task history?"
263
-
264
- # --- Prompt Generation Functionality ---
265
- def log_prompt(self) -> str:
266
- """Provides a prompt to log a specific event."""
267
- return "What event do you want to log?"
268
-
269
- # --- Logging Functionality ---
270
- def log_response(self, event: str) -> str:
271
- """Logs the specified event."""
272
- print(f"Event logged: {event}")
273
- return "Event logged successfully."
274
-
275
- # --- Prompt Modification Functionality ---
276
- def modify_prompt(self, prompt: str) -> str:
277
- """Modifies an existing prompt."""
278
- try:
279
- # Find the prompt to modify
280
- # Update the prompt
281
- return f"Prompt '{prompt}' modified successfully."
282
- except Exception as e:
283
- raise PromptManagementError(f"Error modifying prompt: {e}")
284
-
285
- # --- Prefix Functionality ---
286
- def prefix(self, text: str) -> str:
287
- """Adds a prefix to the provided text."""
288
- return f"PREFIX: {text}"
289
-
290
- # --- Search Query Generation Functionality ---
291
- def search_query(self, query: str) -> str:
292
- """Provides a search query for the specified topic."""
293
- return f"Search query: {query}"
294
-
295
- # --- File Reading Functionality ---
296
- def read_prompt(self, file_path: str) -> str:
297
- """Provides a prompt to read the contents of a file."""
298
- try:
299
- with open(file_path, 'r') as f:
300
- contents = f.read()
301
- return contents
302
- except FileNotFoundError:
303
- raise InvalidInputError(f"Error: File not found: {file_path}")
304
- except Exception as e:
305
- raise InvalidInputError(f"Error reading file: {e}")
306
-
307
- # --- Task Prompt Generation Functionality ---
308
- def task_prompt(self) -> str:
309
- """Provides a prompt to start a new task."""
310
- return "What task do you want to start?"
311
-
312
- # --- Test Results Understanding Prompt Generation Functionality ---
313
- def understand_test_results_prompt(self) -> str:
314
- """Provides a prompt to understand the test results."""
315
- return "What do you want to know about the test results?"
316
-
317
- # --- Input Handling Functionality ---
318
- def handle_input(self, input_str: str):
319
- """Handles user input and executes the corresponding action."""
320
- try:
321
- action, *args = input_str.split()
322
- if action in self.tools:
323
- if args:
324
- try:
325
- self.task_history.append(
326
- {
327
- "action": action,
328
- "input": " ".join(args),
329
- "output": self.tools[action](" ".join(args)),
330
- }
331
- )
332
- print(
333
- f"Action: {action}\nInput: {' '.join(args)}\nOutput: {self.tools[action](' '.join(args))}"
334
- )
335
- except Exception as e:
336
- self.task_history.append(
337
- {
338
- "action": action,
339
- "input": " ".join(args),
340
- "output": f"Error: {e}",
341
- }
342
- )
343
- print(
344
- f"Action: {action}\nInput: {' '.join(args)}\nOutput: Error: {e}"
345
- )
346
- else:
347
- try:
348
- self.task_history.append(
349
- {
350
- "action": action,
351
- "input": None,
352
- "output": self.tools[action](),
353
- }
354
- )
355
- print(
356
- f"Action: {action}\nInput: None\nOutput: {self.tools[action]()}"
357
- )
358
- except Exception as e:
359
- self.task_history.append(
360
- {
361
- "action": action,
362
- "input": None,
363
- "output": f"Error: {e}",
364
- }
365
- )
366
- print(
367
- f"Action: {action}\nInput: None\nOutput: Error: {e}"
368
- )
369
- else:
370
- raise InvalidActionError(
371
- "Invalid action. Please choose a valid action from the list of tools."
372
- )
373
- except (
374
- InvalidActionError,
375
- InvalidInputError,
376
- CodeGenerationError,
377
- CodeRefinementError,
378
- CodeTestingError,
379
- CodeIntegrationError,
380
- AppTestingError,
381
- WorkspaceExplorerError,
382
- PromptManagementError,
383
- SearchError,
384
- ) as e:
385
- print(f"Error: {e}")
386
-
387
- # --- Main Loop of the Agent ---
388
- def run(self):
389
- """Runs the agent continuously, waiting for user input."""
390
- while True:
391
- input_str = input("Enter a command for the AI Agent: ")
392
- self.handle_input(input_str)
393
-
394
-
395
- # --- Streamlit Integration ---
396
- if __name__ == "__main__":
397
- agent = AIAgent()
398
- st.set_page_config(
399
- page_title="AI Agent",
400
- page_icon="🤖",
401
- layout="wide",
402
- initial_sidebar_state="expanded",
403
  )
404
 
405
- # --- Tabbed Navigation ---
406
- tabs = st.tabs(["Agent Generation", "Chat App"])
407
-
408
- with tabs[0]:
409
- st.title("AI Agent Generation")
410
- st.sidebar.title("Agent Settings")
411
-
412
- # --- Command Dropdown ---
413
- command_options = [
414
- "SEARCH",
415
- "CODEGEN",
416
- "REFINE-CODE",
417
- "TEST-CODE",
418
- "INTEGRATE-CODE",
419
- "TEST-APP",
420
- "GENERATE-REPORT",
421
- "WORKSPACE-EXPLORER",
422
- "ADD_PROMPT",
423
- "ACTION_PROMPT",
424
- "COMPRESS_HISTORY_PROMPT",
425
- "LOG_PROMPT",
426
- "LOG_RESPONSE",
427
- "MODIFY_PROMPT",
428
- "PREFIX",
429
- "SEARCH_QUERY",
430
- "READ_PROMPT",
431
- "TASK_PROMPT",
432
- "UNDERSTAND_TEST_RESULTS_PROMPT",
433
- ]
434
- selected_command = st.sidebar.selectbox("Command", command_options)
435
-
436
- # --- Model Dropdown ---
437
- selected_model = st.sidebar.selectbox(
438
- "Model", agent.available_models, index=agent.available_models.index(agent.selected_model)
439
- )
440
- agent.selected_model = selected_model
441
-
442
- # --- Input Field ---
443
- input_str = st.text_input(f"Enter input for {selected_command}:")
444
-
445
- # --- Execute Command ---
446
- if st.button("Execute"):
447
- if input_str:
448
- agent.handle_input(f"{selected_command} {input_str}")
449
- st.write(f"Output: {agent.task_history[-1]['output']}")
450
-
451
- # --- Task History ---
452
- st.subheader("Task History")
453
- for task in agent.task_history:
454
- st.write(f"**Action:** {task['action']}")
455
- st.write(f"**Input:** {task['input']}")
456
- st.write(f"**Output:** {task['output']}")
457
-
458
- # --- Workspace Explorer ---
459
- st.subheader("Workspace Explorer")
460
- with st.expander("Explore Workspace"):
461
- try:
462
- workspace_output = agent.workspace_explorer()
463
- st.write(workspace_output)
464
- except WorkspaceExplorerError as e:
465
- st.error(f"Error exploring workspace: {e}")
466
-
467
- with tabs[1]:
468
- st.title("Chat App")
469
- st.sidebar.title("Chat Settings")
470
-
471
- # --- Model Dropdown ---
472
- selected_chat_model = st.sidebar.selectbox(
473
- "Model", agent.available_models, index=agent.available_models.index(agent.selected_model)
474
- )
475
- agent.selected_model = selected_chat_model
476
-
477
- # --- Chat History ---
478
- chat_history = st.empty()
479
- chat_input = st.text_input("Enter your message:")
480
-
481
- # --- Chat Input Handling ---
482
- if st.button("Send"):
483
- if chat_input:
484
- user_message = chat_input
485
- chat_history.markdown(f"**You:** {user_message}")
486
- chat_input.empty()
487
-
488
- # --- Generate Response ---
489
- try:
490
- if not agent.code_generator:
491
- agent.code_generator = pipeline(
492
- "text-generation", model=agent.selected_model
493
- )
494
- response = agent.code_generator(
495
- user_message, max_length=500, num_return_sequences=1
496
- )[0]["generated_text"]
497
- chat_history.markdown(f"**Agent:** {response}")
498
- except Exception as e:
499
- chat_history.markdown(f"**Agent:** Error: {e}")
500
-
501
- # --- Gradio Integration ---
502
- def gradio_interface(input_text):
503
- """Gradio interface function."""
504
  try:
505
- agent.handle_input(input_text)
506
- output = agent.task_history[-1]["output"] # Get the latest output
507
- return output
 
 
 
 
 
 
 
 
 
 
508
  except Exception as e:
509
- return f"Error: {e}"
510
-
511
-
512
- iface = gr.Interface(
513
- fn=gradio_interface,
514
- inputs=gr.Textbox(label="Enter Command"),
515
- outputs=gr.Textbox(label="Output"),
516
- title="AI Agent",
517
- description="Interact with the AI Agent.",
518
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
519
 
520
- iface.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
  import subprocess
3
+ import random
4
+ from huggingface_hub import InferenceClient
5
  import gradio as gr
6
+ from safe_search import safe_search
7
+ from i_search import google
8
+ from i_search import i_search as i_s
9
+ from agent import (
10
+ ACTION_PROMPT,
11
+ ADD_PROMPT,
12
+ COMPRESS_HISTORY_PROMPT,
13
+ LOG_PROMPT,
14
+ LOG_RESPONSE,
15
+ MODIFY_PROMPT,
16
+ PREFIX,
17
+ SEARCH_QUERY,
18
+ READ_PROMPT,
19
+ TASK_PROMPT,
20
+ UNDERSTAND_TEST_RESULTS_PROMPT,
21
+ )
22
+ from utils import parse_action, parse_file_content, read_python_module_structure
23
  from datetime import datetime
24
+ now = datetime.now()
25
+ date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
26
+
27
+ client = InferenceClient(
28
+ "mistralai/Mixtral-8x7B-Instruct-v0.1"
29
+ )
30
+
31
+ ############################################
32
+
33
+
34
+ VERBOSE = True
35
+ MAX_HISTORY = 100
36
+ #MODEL = "gpt-3.5-turbo" # "gpt-4"
37
+
38
+
39
+ def format_prompt(message, history):
40
+ prompt = "<s>"
41
+ for user_prompt, bot_response in history:
42
+ prompt += f"[INST] {user_prompt} [/INST]"
43
+ prompt += f" {bot_response}</s> "
44
+ prompt += f"[INST] {message} [/INST]"
45
+ return prompt
46
+
47
+
48
+
49
+ def run_gpt(
50
+ prompt_template,
51
+ stop_tokens,
52
+ max_tokens,
53
+ purpose,
54
+ **prompt_kwargs,
55
+ ):
56
+ seed = random.randint(1,1111111111111111)
57
+ print (seed)
58
+ generate_kwargs = dict(
59
+ temperature=1.0,
60
+ max_new_tokens=2096,
61
+ top_p=0.99,
62
+ repetition_penalty=1.0,
63
+ do_sample=True,
64
+ seed=seed,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  )
66
 
67
+
68
+ content = PREFIX.format(
69
+ date_time_str=date_time_str,
70
+ purpose=purpose,
71
+ safe_search=safe_search,
72
+ ) + prompt_template.format(**prompt_kwargs)
73
+ if VERBOSE:
74
+ print(LOG_PROMPT.format(content))
75
+
76
+
77
+ #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
78
+ #formatted_prompt = format_prompt(f'{content}', history)
79
+
80
+ stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
81
+ resp = ""
82
+ for response in stream:
83
+ resp += response.token.text
84
+
85
+ if VERBOSE:
86
+ print(LOG_RESPONSE.format(resp))
87
+ return resp
88
+
89
+
90
+ def compress_history(purpose, task, history, directory):
91
+ resp = run_gpt(
92
+ COMPRESS_HISTORY_PROMPT,
93
+ stop_tokens=["observation:", "task:", "action:", "thought:"],
94
+ max_tokens=512,
95
+ purpose=purpose,
96
+ task=task,
97
+ history=history,
98
+ )
99
+ history = "observation: {}\n".format(resp)
100
+ return history
101
+
102
+ def call_search(purpose, task, history, directory, action_input):
103
+ print("CALLING SEARCH")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  try:
105
+
106
+ if "http" in action_input:
107
+ if "<" in action_input:
108
+ action_input = action_input.strip("<")
109
+ if ">" in action_input:
110
+ action_input = action_input.strip(">")
111
+
112
+ response = i_s(action_input)
113
+ #response = google(search_return)
114
+ print(response)
115
+ history += "observation: search result is: {}\n".format(response)
116
+ else:
117
+ history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
118
  except Exception as e:
119
+ history += "observation: {}'\n".format(e)
120
+ return "MAIN", None, history, task
121
+
122
+ def call_main(purpose, task, history, directory, action_input):
123
+ resp = run_gpt(
124
+ ACTION_PROMPT,
125
+ stop_tokens=["observation:", "task:", "action:","thought:"],
126
+ max_tokens=2096,
127
+ purpose=purpose,
128
+ task=task,
129
+ history=history,
130
+ )
131
+ lines = resp.strip().strip("\n").split("\n")
132
+ for line in lines:
133
+ if line == "":
134
+ continue
135
+ if line.startswith("thought: "):
136
+ history += "{}\n".format(line)
137
+ elif line.startswith("action: "):
138
+
139
+ action_name, action_input = parse_action(line)
140
+ print (f'ACTION_NAME :: {action_name}')
141
+ print (f'ACTION_INPUT :: {action_input}')
142
+
143
+ history += "{}\n".format(line)
144
+ if "COMPLETE" in action_name or "COMPLETE" in action_input:
145
+ task = "END"
146
+ return action_name, action_input, history, task
147
+ else:
148
+ return action_name, action_input, history, task
149
+ else:
150
+ history += "{}\n".format(line)
151
+ #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
152
+
153
+ #return action_name, action_input, history, task
154
+ #assert False, "unknown action: {}".format(line)
155
+ return "MAIN", None, history, task
156
+
157
+
158
+ def call_set_task(purpose, task, history, directory, action_input):
159
+ task = run_gpt(
160
+ TASK_PROMPT,
161
+ stop_tokens=[],
162
+ max_tokens=64,
163
+ purpose=purpose,
164
+ task=task,
165
+ history=history,
166
+ ).strip("\n")
167
+ history += "observation: task has been updated to: {}\n".format(task)
168
+ return "MAIN", None, history, task
169
+
170
+ def end_fn(purpose, task, history, directory, action_input):
171
+ task = "END"
172
+ return "COMPLETE", "COMPLETE", history, task
173
+
174
+ NAME_TO_FUNC = {
175
+ "MAIN": call_main,
176
+ "UPDATE-TASK": call_set_task,
177
+ "SEARCH": call_search,
178
+ "COMPLETE": end_fn,
179
+
180
+ }
181
+
182
+ def run_action(purpose, task, history, directory, action_name, action_input):
183
+ print(f'action_name::{action_name}')
184
+ try:
185
+ if "RESPONSE" in action_name or "COMPLETE" in action_name:
186
+ action_name="COMPLETE"
187
+ task="END"
188
+ return action_name, "COMPLETE", history, task
189
+
190
+ # compress the history when it is long
191
+ if len(history.split("\n")) > MAX_HISTORY:
192
+ if VERBOSE:
193
+ print("COMPRESSING HISTORY")
194
+ history = compress_history(purpose, task, history, directory)
195
+ if not action_name in NAME_TO_FUNC:
196
+ action_name="MAIN"
197
+ if action_name == "" or action_name == None:
198
+ action_name="MAIN"
199
+ assert action_name in NAME_TO_FUNC
200
+
201
+ print("RUN: ", action_name, action_input)
202
+ return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
203
+ except Exception as e:
204
+ history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
205
+
206
+ return "MAIN", None, history, task
207
+
208
+ def run(purpose,history):
209
+
210
+ #print(purpose)
211
+ #print(hist)
212
+ task=None
213
+ directory="./"
214
+ if history:
215
+ history=str(history).strip("[]")
216
+ if not history:
217
+ history = ""
218
+
219
+ action_name = "UPDATE-TASK" if task is None else "MAIN"
220
+ action_input = None
221
+ while True:
222
+ print("")
223
+ print("")
224
+ print("---")
225
+ print("purpose:", purpose)
226
+ print("task:", task)
227
+ print("---")
228
+ print(history)
229
+ print("---")
230
+
231
+ action_name, action_input, history, task = run_action(
232
+ purpose,
233
+ task,
234
+ history,
235
+ directory,
236
+ action_name,
237
+ action_input,
238
+ )
239
+ yield (history)
240
+ #yield ("",[(purpose,history)])
241
+ if task == "END":
242
+ return (history)
243
+ #return ("", [(purpose,history)])
244
+
245
+
246
+
247
+ ################################################
248
+
249
+ def format_prompt(message, history):
250
+ prompt = "<s>"
251
+ for user_prompt, bot_response in history:
252
+ prompt += f"[INST] {user_prompt} [/INST]"
253
+ prompt += f" {bot_response}</s> "
254
+ prompt += f"[INST] {message} [/INST]"
255
+ return prompt
256
+ agents =[
257
+ "WEB_DEV",
258
+ "AI_SYSTEM_PROMPT",
259
+ "PYTHON_CODE_DEV"
260
+ ]
261
+ def generate(
262
+ prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
263
+ ):
264
+ seed = random.randint(1,1111111111111111)
265
+
266
+ agent=prompts.WEB_DEV
267
+ if agent_name == "WEB_DEV":
268
+ agent = prompts.WEB_DEV
269
+ if agent_name == "AI_SYSTEM_PROMPT":
270
+ agent = prompts.AI_SYSTEM_PROMPT
271
+ if agent_name == "PYTHON_CODE_DEV":
272
+ agent = prompts.PYTHON_CODE_DEV
273
+ system_prompt=agent
274
+ temperature = float(temperature)
275
+ if temperature < 1e-2:
276
+ temperature = 1e-2
277
+ top_p = float(top_p)
278
+
279
+ generate_kwargs = dict(
280
+ temperature=temperature,
281
+ max_new_tokens=max_new_tokens,
282
+ top_p=top_p,
283
+ repetition_penalty=repetition_penalty,
284
+ do_sample=True,
285
+ seed=seed,
286
+ )
287
 
288
+ formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
289
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
290
+ output = ""
291
+
292
+ for response in stream:
293
+ output += response.token.text
294
+ yield output
295
+ return output
296
+
297
+
298
+ additional_inputs=[
299
+ gr.Dropdown(
300
+ label="Agents",
301
+ choices=[s for s in agents],
302
+ value=agents[0],
303
+ interactive=True,
304
+ ),
305
+ gr.Textbox(
306
+ label="System Prompt",
307
+ max_lines=1,
308
+ interactive=True,
309
+ ),
310
+ gr.Slider(
311
+ label="Temperature",
312
+ value=0.9,
313
+ minimum=0.0,
314
+ maximum=1.0,
315
+ step=0.05,
316
+ interactive=True,
317
+ info="Higher values produce more diverse outputs",
318
+ ),
319
+
320
+ gr.Slider(
321
+ label="Max new tokens",
322
+ value=1048*10,
323
+ minimum=0,
324
+ maximum=1048*10,
325
+ step=64,
326
+ interactive=True,
327
+ info="The maximum numbers of new tokens",
328
+ ),
329
+ gr.Slider(
330
+ label="Top-p (nucleus sampling)",
331
+ value=0.90,
332
+ minimum=0.0,
333
+ maximum=1,
334
+ step=0.05,
335
+ interactive=True,
336
+ info="Higher values sample more low-probability tokens",
337
+ ),
338
+ gr.Slider(
339
+ label="Repetition penalty",
340
+ value=1.2,
341
+ minimum=1.0,
342
+ maximum=2.0,
343
+ step=0.05,
344
+ interactive=True,
345
+ info="Penalize repeated tokens",
346
+ ),
347
+
348
+
349
+ ]
350
+
351
+ examples=[["What are the biggest news stories today?", None, None, None, None, None, ],
352
+ ["When is the next full moon?", None, None, None, None, None, ],
353
+ ["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
354
+ ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
355
+ ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None,],
356
+ ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None,],
357
+ ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None,],
358
+ ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
359
+ ]
360
+
361
+ '''
362
+ gr.ChatInterface(
363
+ fn=run,
364
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
365
+ title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
366
+ examples=examples,
367
+ concurrency_limit=20,
368
+ with gr.Blocks() as ifacea:
369
+ gr.HTML("""TEST""")
370
+ ifacea.launch()
371
+ ).launch()
372
+ with gr.Blocks() as iface:
373
+ #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
374
+ chatbot=gr.Chatbot()
375
+ msg = gr.Textbox()
376
+ with gr.Row():
377
+ submit_b = gr.Button()
378
+ clear = gr.ClearButton([msg, chatbot])
379
+ submit_b.click(run, [msg,chatbot],[msg,chatbot])
380
+ msg.submit(run, [msg, chatbot], [msg, chatbot])
381
+ iface.launch()
382
+ '''
383
+ gr.ChatInterface(
384
+ fn=run,
385
+ chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
386
+ title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
387
+ examples=examples,
388
+ concurrency_limit=20,
389
+ ).launch(show_api=False)