acecalisto3 commited on
Commit
d331751
·
verified ·
1 Parent(s): 8ae11cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +222 -364
app.py CHANGED
@@ -1,11 +1,15 @@
1
  import os
2
  import subprocess
3
  import random
4
- from huggingface_hub import InferenceClient
 
 
 
5
  import gradio as gr
 
6
  from safe_search import safe_search
7
- from i_search import google
8
- from i_search import i_search as i_s
9
  from agent import (
10
  ACTION_PROMPT,
11
  ADD_PROMPT,
@@ -19,372 +23,226 @@ from agent import (
19
  TASK_PROMPT,
20
  UNDERSTAND_TEST_RESULTS_PROMPT,
21
  )
22
- from utils import parse_action, parse_file_content, read_python_module_structure
23
- from datetime import datetime
24
- now = datetime.now()
25
- date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
26
-
27
- client = InferenceClient(
28
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
29
- )
30
-
31
- ############################################
32
-
33
-
34
- VERBOSE = True
35
- MAX_HISTORY = 100
36
- #MODEL = "gpt-3.5-turbo" # "gpt-4"
37
-
38
 
39
- def format_prompt(message, history):
40
- prompt = "<s>"
41
- for user_prompt, bot_response in history:
42
- prompt += f"[INST] {user_prompt} [/INST]"
43
- prompt += f" {bot_response}</s> "
44
- prompt += f"[INST] {message} [/INST]"
45
- return prompt
46
-
47
-
48
-
49
- def run_gpt(
50
- prompt_template,
51
- stop_tokens,
52
- max_tokens,
53
- purpose,
54
- **prompt_kwargs,
55
- ):
56
- seed = random.randint(1,1111111111111111)
57
- print (seed)
58
- generate_kwargs = dict(
59
- temperature=1.0,
60
- max_new_tokens=2096,
61
- top_p=0.99,
62
- repetition_penalty=1.0,
63
- do_sample=True,
64
- seed=seed,
65
- )
66
-
67
-
68
- content = PREFIX.format(
69
- date_time_str=date_time_str,
70
- purpose=purpose,
71
- safe_search=safe_search,
72
- ) + prompt_template.format(**prompt_kwargs)
73
- if VERBOSE:
74
- print(LOG_PROMPT.format(content))
75
-
76
-
77
- #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
78
- #formatted_prompt = format_prompt(f'{content}', history)
79
-
80
- stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
81
- resp = ""
82
- for response in stream:
83
- resp += response.token.text
84
-
85
- if VERBOSE:
86
- print(LOG_RESPONSE.format(resp))
87
- return resp
88
 
89
 
90
- def compress_history(purpose, task, history, directory):
91
- resp = run_gpt(
92
- COMPRESS_HISTORY_PROMPT,
93
- stop_tokens=["observation:", "task:", "action:", "thought:"],
94
- max_tokens=512,
95
- purpose=purpose,
96
- task=task,
97
- history=history,
98
- )
99
- history = "observation: {}\n".format(resp)
100
- return history
101
-
102
- def call_search(purpose, task, history, directory, action_input):
103
- print("CALLING SEARCH")
104
- try:
105
-
106
- if "http" in action_input:
107
- if "<" in action_input:
108
- action_input = action_input.strip("<")
109
- if ">" in action_input:
110
- action_input = action_input.strip(">")
111
-
112
- response = i_s(action_input)
113
- #response = google(search_return)
114
- print(response)
115
- history += "observation: search result is: {}\n".format(response)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
  else:
117
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
118
- except Exception as e:
119
- history += "observation: {}'\n".format(e)
120
- return "MAIN", None, history, task
121
-
122
- def call_main(purpose, task, history, directory, action_input):
123
- resp = run_gpt(
124
- ACTION_PROMPT,
125
- stop_tokens=["observation:", "task:", "action:","thought:"],
126
- max_tokens=2096,
127
- purpose=purpose,
128
- task=task,
129
- history=history,
130
- )
131
- lines = resp.strip().strip("\n").split("\n")
132
- for line in lines:
133
- if line == "":
134
- continue
135
- if line.startswith("thought: "):
136
- history += "{}\n".format(line)
137
- elif line.startswith("action: "):
138
-
139
- action_name, action_input = parse_action(line)
140
- print (f'ACTION_NAME :: {action_name}')
141
- print (f'ACTION_INPUT :: {action_input}')
142
-
143
- history += "{}\n".format(line)
144
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
145
- task = "END"
146
- return action_name, action_input, history, task
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
147
  else:
148
- return action_name, action_input, history, task
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
149
  else:
150
- history += "{}\n".format(line)
151
- #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
152
-
153
- #return action_name, action_input, history, task
154
- #assert False, "unknown action: {}".format(line)
155
- return "MAIN", None, history, task
156
-
157
-
158
- def call_set_task(purpose, task, history, directory, action_input):
159
- task = run_gpt(
160
- TASK_PROMPT,
161
- stop_tokens=[],
162
- max_tokens=64,
163
- purpose=purpose,
164
- task=task,
165
- history=history,
166
- ).strip("\n")
167
- history += "observation: task has been updated to: {}\n".format(task)
168
- return "MAIN", None, history, task
169
-
170
- def end_fn(purpose, task, history, directory, action_input):
171
- task = "END"
172
- return "COMPLETE", "COMPLETE", history, task
173
-
174
- NAME_TO_FUNC = {
175
- "MAIN": call_main,
176
- "UPDATE-TASK": call_set_task,
177
- "SEARCH": call_search,
178
- "COMPLETE": end_fn,
179
-
180
- }
181
 
182
- def run_action(purpose, task, history, directory, action_name, action_input):
183
- print(f'action_name::{action_name}')
 
 
184
  try:
185
- if "RESPONSE" in action_name or "COMPLETE" in action_name:
186
- action_name="COMPLETE"
187
- task="END"
188
- return action_name, "COMPLETE", history, task
189
-
190
- # compress the history when it is long
191
- if len(history.split("\n")) > MAX_HISTORY:
192
- if VERBOSE:
193
- print("COMPRESSING HISTORY")
194
- history = compress_history(purpose, task, history, directory)
195
- if not action_name in NAME_TO_FUNC:
196
- action_name="MAIN"
197
- if action_name == "" or action_name == None:
198
- action_name="MAIN"
199
- assert action_name in NAME_TO_FUNC
200
-
201
- print("RUN: ", action_name, action_input)
202
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
203
- except Exception as e:
204
- history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
205
-
206
- return "MAIN", None, history, task
207
-
208
- def run(purpose, history):
209
-
210
- #print(purpose)
211
- #print(hist)
212
- task=None
213
- directory="./"
214
- if history:
215
- history=str(history).strip("[]")
216
- if not history:
217
- history = ""
218
-
219
- action_name = "UPDATE-TASK" if task is None else "MAIN"
220
- action_input = None
221
- while True:
222
- print("")
223
- print("")
224
- print("---")
225
- print("purpose:", purpose)
226
- print("task:", task)
227
- print("---")
228
- print(history)
229
- print("---")
230
-
231
- action_name, action_input, history, task = run_action(
232
- purpose,
233
- task,
234
- history,
235
- directory,
236
- action_name,
237
- action_input,
238
- )
239
- yield (history) # Yield the updated chat history
240
- #yield ("",[(purpose,history)])
241
- if task == "END":
242
- return "", history # Return an empty string for the Textbox and the chat history for the Chatbot
243
- #return ("", [(purpose,history)])
244
-
245
-
246
- ################################################
247
-
248
- def format_prompt(message, history):
249
- prompt = "<s>"
250
- for user_prompt, bot_response in history:
251
- prompt += f"[INST] {user_prompt} [/INST]"
252
- prompt += f" {bot_response}</s> "
253
- prompt += f"[INST] {message} [/INST]"
254
- return prompt
255
- agents =[
256
- "WEB_DEV",
257
- "AI_SYSTEM_PROMPT",
258
- "PYTHON_CODE_DEV"
259
- ]
260
- def generate(
261
- prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.0,
262
- ):
263
- seed = random.randint(1,1111111111111111)
264
-
265
- agent=prompts.WEB_DEV
266
- if agent_name == "WEB_DEV":
267
- agent = prompts.WEB_DEV
268
- if agent_name == "AI_SYSTEM_PROMPT":
269
- agent = prompts.AI_SYSTEM_PROMPT
270
- if agent_name == "PYTHON_CODE_DEV":
271
- agent = prompts.PYTHON_CODE_DEV
272
- system_prompt=agent
273
- temperature = float(temperature)
274
- if temperature < 1e-2:
275
- temperature = 1e-2
276
- top_p = float(top_p)
277
-
278
- generate_kwargs = dict(
279
- temperature=temperature,
280
- max_new_tokens=max_new_tokens,
281
- top_p=top_p,
282
- repetition_penalty=repetition_penalty,
283
- do_sample=True,
284
- seed=seed,
285
- )
286
-
287
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
288
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
289
- output = ""
290
-
291
- for response in stream:
292
- output += response.token.text
293
- yield output
294
- return output
295
-
296
-
297
- additional_inputs=[
298
- gr.Dropdown(
299
- label="Agents",
300
- choices=[s for s in agents],
301
- value=agents[0],
302
- interactive=True,
303
- ),
304
- gr.Textbox(
305
- label="System Prompt",
306
- max_lines=1,
307
- interactive=True,
308
- ),
309
- gr.Slider(
310
- label="Temperature",
311
- value=0.9,
312
- minimum=0.0,
313
- maximum=1.0,
314
- step=0.05,
315
- interactive=True,
316
- info="Higher values produce more diverse outputs",
317
- ),
318
-
319
- gr.Slider(
320
- label="Max new tokens",
321
- value=1048*10,
322
- minimum=0,
323
- maximum=1048*10,
324
- step=64,
325
- interactive=True,
326
- info="The maximum numbers of new tokens",
327
- ),
328
- gr.Slider(
329
- label="Top-p (nucleus sampling)",
330
- value=0.90,
331
- minimum=0.0,
332
- maximum=1,
333
- step=0.05,
334
- interactive=True,
335
- info="Higher values sample more low-probability tokens",
336
- ),
337
- gr.Slider(
338
- label="Repetition penalty",
339
- value=1.2,
340
- minimum=1.0,
341
- maximum=2.0,
342
- step=0.05,
343
- interactive=True,
344
- info="Penalize repeated tokens",
345
- ),
346
-
347
-
348
- ]
349
-
350
- examples=[["What are the biggest news stories today?", None, None, None, None, None, ],
351
- ["When is the next full moon?", None, None, None, None, None, ],
352
- ["I'm planning a vacation to Japan. Can you suggest a one-week itinerary including must-visit places and local cuisines to try?", None, None, None, None, None, ],
353
- ["Can you write a short story about a time-traveling detective who solves historical mysteries?", None, None, None, None, None,],
354
- ["I'm trying to learn French. Can you provide some common phrases that would be useful for a beginner, along with their pronunciations?", None, None, None, None, None,],
355
- ["I have chicken, rice, and bell peppers in my kitchen. Can you suggest an easy recipe I can make with these ingredients?", None, None, None, None, None,],
356
- ["Can you explain how the QuickSort algorithm works and provide a Python implementation?", None, None, None, None, None,],
357
- ["What are some unique features of Rust that make it stand out compared to other systems programming languages like C++?", None, None, None, None, None,],
358
- ]
359
-
360
- '''
361
- gr.ChatInterface(
362
- fn=run,
363
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
364
- title="Mixtral 46.7B\nMicro-Agent\nInternet Search <br> development test",
365
- examples=examples,
366
- concurrency_limit=20,
367
- with gr.Blocks() as ifacea:
368
- gr.HTML("""TEST""")
369
- ifacea.launch()
370
- ).launch()
371
- with gr.Blocks() as iface:
372
- #chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
373
- chatbot=gr.Chatbot()
374
- msg = gr.Textbox()
375
- with gr.Row():
376
- submit_b = gr.Button()
377
- clear = gr.ClearButton([msg, chatbot])
378
- submit_b.click(run, [msg,chatbot],[msg,chatbot])
379
- msg.submit(run, [msg, chatbot], [msg, chatbot])
380
- iface.launch()
381
- '''
382
- with gr.Blocks() as iface:
383
- chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, layout="panel")
384
- msg = gr.Textbox()
385
- with gr.Row():
386
- submit_b = gr.Button()
387
- clear = gr.ClearButton([msg, chatbot])
388
- submit_b.click(run, [msg,chatbot],[msg,chatbot])
389
- msg.submit(run, [msg, chatbot], [msg, chatbot])
390
- iface.launch()
 
1
  import os
2
  import subprocess
3
  import random
4
+ import json
5
+ from datetime import datetime
6
+
7
+ from huggingface_hub import InferenceClient, cached_download, hf_hub_url
8
  import gradio as gr
9
+
10
  from safe_search import safe_search
11
+ from i_search import google, i_search as i_s
12
+
13
  from agent import (
14
  ACTION_PROMPT,
15
  ADD_PROMPT,
 
23
  TASK_PROMPT,
24
  UNDERSTAND_TEST_RESULTS_PROMPT,
25
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
+ from utils import parse_action, parse_file_content, read_python_module_structure
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
 
30
+ class App:
31
+ def __init__(self):
32
+ self.app_state = {"components": []}
33
+ self.terminal_history = ""
34
+ self.components_registry = {
35
+ "Button": {
36
+ "properties": {
37
+ "label": "Click Me",
38
+ "onclick": ""
39
+ },
40
+ "description": "A clickable button",
41
+ "code_snippet": "gr.Button(value='{{label}}', variant='primary')"
42
+ },
43
+ "Text Input": {
44
+ "properties": {
45
+ "value": "",
46
+ "placeholder": "Enter text"
47
+ },
48
+ "description": "A field for entering text",
49
+ "code_snippet": "gr.Textbox(label='{{placeholder}}')"
50
+ },
51
+ "Image": {
52
+ "properties": {
53
+ "src": "#",
54
+ "alt": "Image"
55
+ },
56
+ "description": "Displays an image",
57
+ "code_snippet": "gr.Image(label='{{alt}}')"
58
+ },
59
+ "Dropdown": {
60
+ "properties": {
61
+ "choices": ["Option 1", "Option 2"],
62
+ "value": ""
63
+ },
64
+ "description": "A dropdown menu for selecting options",
65
+ "code_snippet": "gr.Dropdown(choices={{choices}}, label='Dropdown')"
66
+ }
67
+ }
68
+ self.nlp_model_names = [
69
+ "google/flan-t5-small",
70
+ "Qwen/CodeQwen1.5-7B-Chat-GGUF",
71
+ "bartowski/Codestral-22B-v0.1-GGUF",
72
+ "bartowski/AutoCoder-GGUF"
73
+ ]
74
+ self.nlp_models = []
75
+ self.initialize_nlp_models()
76
+
77
+ def initialize_nlp_models(self):
78
+ for nlp_model_name in self.nlp_model_names:
79
+ try:
80
+ cached_download(hf_hub_url(nlp_model_name, revision="main"))
81
+ self.nlp_models.append(InferenceClient(nlp_model_name))
82
+ except:
83
+ self.nlp_models.append(None)
84
+
85
+ def get_nlp_response(self, input_text, model_index):
86
+ if self.nlp_models[model_index]:
87
+ response = self.nlp_models[model_index].text_generation(input_text)
88
+ return response.generated_text
89
  else:
90
+ return "NLP model not available."
91
+
92
+ class Component:
93
+ def __init__(self, type, properties=None, id=None):
94
+ self.id = id or random.randint(1000, 9999)
95
+ self.type = type
96
+ self.properties = properties or self.components_registry[type]["properties"].copy()
97
+
98
+ def to_dict(self):
99
+ return {
100
+ "id": self.id,
101
+ "type": self.type,
102
+ "properties": self.properties,
103
+ }
104
+
105
+ def render(self):
106
+ if self.type == "Dropdown":
107
+ self.properties["choices"] = str(self.properties["choices"]).replace("[", "").replace("]", "").replace("'", "")
108
+ return self.components_registry[self.type]["code_snippet"].format(**self.properties)
109
+
110
+ def update_app_canvas(self):
111
+ components_html = "".join([f"<div>Component ID: {component['id']}, Type: {component['type']}, Properties: {component['properties']}</div>" for component in self.app_state["components"]])
112
+ return components_html
113
+
114
+ def add_component(self, component_type):
115
+ if component_type in self.components_registry:
116
+ new_component = self.Component(component_type)
117
+ self.app_state["components"].append(new_component.to_dict())
118
+ return (
119
+ self.update_app_canvas(),
120
+ f"System: Added component: {component_type}\n",
121
+ )
122
+ else:
123
+ return None, f"Error: Invalid component type: {component_type}\n"
124
+
125
+ def run_terminal_command(self, command, history):
126
+ output = ""
127
+ try:
128
+ if command.startswith("add "):
129
+ component_type = command.split("add ")[1]
130
+ return self.add_component(component_type)
131
+ elif command.startswith("search "):
132
+ query = command.split("search ")[1]
133
+ return google(query)
134
+ elif command.startswith("i search "):
135
+ query = command.split("i search ")[1]
136
+ return i_s(query)
137
+ elif command.startswith("safe search "):
138
+ query = command.split("safesearch ")[1]
139
+ return safe_search(query)
140
+ elif command.startswith("read "):
141
+ file_path = command.split("read ")[1]
142
+ return parse_file_content(file_path)
143
+ elif command == "task":
144
+ return TASK_PROMPT
145
+ elif command == "modify":
146
+ return MODIFY_PROMPT
147
+ elif command == "log":
148
+ return LOG_PROMPT
149
+ elif command.startswith("understand test results "):
150
+ test_results = command.split("understand test results ")[1]
151
+ return self.understand_test_results(test_results)
152
+ elif command.startswith("compress history"):
153
+ return self.compress_history(history)
154
+ elif command == "help":
155
+ return self.get_help_message()
156
+ elif command == "exit":
157
+ exit()
158
  else:
159
+ output = subprocess.check_output(command, shell=True).decode("utf-8")
160
+ except Exception as e:
161
+ output = str(e)
162
+ return output or "No output\n"
163
+
164
+ def compress_history(self, history):
165
+ compressed_history = ""
166
+ lines = history.strip().split("\n")
167
+ for line in lines:
168
+ if not line.strip().startswith("#"):
169
+ compressed_history += line + "\n"
170
+ return compressed_history
171
+
172
+ def understand_test_results(self, test_results):
173
+ # Logic to understand test results
174
+ return UNDERSTAND_TEST_RESULTS_PROMPT
175
+
176
+ def get_help_message(self):
177
+ return """
178
+ Available commands:
179
+ - add [component_type]: Add a component to the app canvas
180
+ - search [query]: Perform a Google search
181
+ - i search [query]: Perform an intelligent search
182
+ - safe search [query]: Perform a safe search
183
+ - read [file_path]: Read and parse the content of a Python module
184
+ - task: Prompt for a task to perform
185
+ - modify: Prompt to modify a component property
186
+ - log: Prompt to log a response
187
+ - understand test results [test_results]: Understand test results
188
+ - compress history: Compress the terminal history by removing comments
189
+ - help: Show this help message
190
+ - exit: Exit the program
191
+ """
192
+
193
+ def process_input(self, input_text):
194
+ if input_text.strip().startswith("/"):
195
+ command = input_text.strip().lstrip("/")
196
+ output = self.run_terminal_command(command, self.terminal_history)
197
+ self.terminal_history += f"{input_text}\n{output}\n"
198
+ return output
199
  else:
200
+ model_index = random.randint(0, len(self.nlp_models)-1)
201
+ response = self.get_nlp_response(input_text, model_index)
202
+ component_id, action, property_name, property_value = parse_action(response)
203
+ if component_id:
204
+ component = next((comp for comp in self.app_state["components"] if comp["id"] == component_id), None)
205
+ if component:
206
+ if action == "update":
207
+ component["properties"][property_name] = property_value
208
+ return (
209
+ self.update_app_canvas(),
210
+ f"System: Updated property '{property_name}' of component with ID {component_id}\n",
211
+ )
212
+ elif action == "remove":
213
+ self.app_state["components"].remove(component)
214
+ return (
215
+ self.update_app_canvas(),
216
+ f"System: Removed component with ID {component_id}\n",
217
+ )
218
+ else:
219
+ return None, f"Error: Invalid action: {action}\n"
220
+ else:
221
+ return None, f"Error: Component with ID {component_id} not found\n"
222
+ else:
223
+ return None, f"Error: Failed to parse action from NLP response\n"
 
 
 
 
 
 
 
224
 
225
+ def run(self):
226
+ print("Welcome to the Python App Builder!")
227
+ print("Type 'help' to see the available commands.")
228
+ print("-" * 50)
229
  try:
230
+ while True:
231
+ try:
232
+ input_text = input("Enter input: ")
233
+ except EOFError:
234
+ print("Error: Input reading interrupted. Please provide valid input.")
235
+ continue
236
+
237
+ output, system_message = self.process_input(input_text)
238
+ if output:
239
+ print(output)
240
+ if system_message:
241
+ print(system_message)
242
+ except KeyboardInterrupt:
243
+ print("\nApplication stopped by user.")
244
+
245
+
246
+ if __name__ == "__main__":
247
+ app = App()
248
+ app.run()