acecalisto3 commited on
Commit
7d3b433
1 Parent(s): 09466d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +280 -118
app.py CHANGED
@@ -3,80 +3,25 @@ import gradio as gr
3
  import random
4
  import os
5
  import subprocess
 
 
 
 
6
 
 
7
  API_URL = "https://api-inference.huggingface.co/models/"
 
 
 
 
 
 
8
 
9
- client = InferenceClient(
10
- "mistralai/Mixtral-8x7B-Instruct-v0.1"
11
- )
12
 
13
- def format_prompt(message, history, agent_roles):
14
- """Formats the prompt with the selected agent roles and conversation history."""
15
- prompt = f"""
16
- You are an expert agent cluster, consisting of {', '.join(agent_roles)}.
17
- Respond with complete program coding to client requests.
18
- Using available tools, please explain the researched information.
19
- Please don't answer based solely on what you already know. Always perform a search before providing a response.
20
- In special cases, such as when the user specifies a page to read, there's no need to search.
21
- Please read the provided page and answer the user's question accordingly.
22
- If you find that there's not much information just by looking at the search results page, consider these two options and try them out:
23
- - Try clicking on the links of the search results to access and read the content of each page.
24
- - Change your search query and perform a new search.
25
- Users are extremely busy and not as free as you are.
26
- Therefore, to save the user's effort, please provide direct answers.
27
- BAD ANSWER EXAMPLE
28
- - Please refer to these pages.
29
- - You can write code referring these pages.
30
- - Following page will be helpful.
31
- GOOD ANSWER EXAMPLE
32
- - This is the complete code: -- complete code here --
33
- - The answer of you question is -- answer here --
34
- Please make sure to list the URLs of the pages you referenced at the end of your answer. (This will allow users to verify your response.)
35
- Please make sure to answer in the language used by the user. If the user asks in Japanese, please answer in Japanese. If the user asks in Spanish, please answer in Spanish.
36
- But, you can go ahead and search in English, especially for programming-related questions. PLEASE MAKE SURE TO ALWAYS SEARCH IN ENGLISH FOR THOSE.
37
- """
38
-
39
- for user_prompt, bot_response in history:
40
- prompt += f"[INST] {user_prompt} [/INST]"
41
- prompt += f" {bot_response}</s> "
42
-
43
- prompt += f"[INST] {message} [/INST]"
44
- return prompt
45
-
46
- def generate(prompt, history, agent_roles, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
47
- """Generates a response using the selected agent roles and parameters."""
48
- temperature = float(temperature)
49
- if temperature < 1e-2:
50
- temperature = 1e-2
51
- top_p = float(top_p)
52
-
53
- generate_kwargs = dict(
54
- temperature=temperature,
55
- max_new_tokens=max_new_tokens,
56
- top_p=top_p,
57
- repetition_penalty=repetition_penalty,
58
- do_sample=True,
59
- seed=random.randint(0, 10**7),
60
- )
61
-
62
- formatted_prompt = format_prompt(prompt, history, agent_roles)
63
-
64
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
65
- output = ""
66
-
67
- for response in stream:
68
- output += response.token.text
69
- yield output
70
- return output
71
-
72
- def change_agent(agent_name):
73
- """Updates the selected agent role."""
74
- global selected_agent
75
- selected_agent = agent_name
76
- return f"Agent switched to: {agent_name}"
77
-
78
- # Define the available agent roles
79
- agent_roles = {
80
  "Web Developer": {"description": "A master of front-end and back-end web development.", "active": False},
81
  "Prompt Engineer": {"description": "An expert in crafting effective prompts for AI models.", "active": False},
82
  "Python Code Developer": {"description": "A skilled Python programmer who can write clean and efficient code.", "active": False},
@@ -87,7 +32,7 @@ agent_roles = {
87
  # Initialize the selected agent
88
  selected_agent = list(agent_roles.keys())[0]
89
 
90
- # Define the initial prompt for the selected agent
91
  initial_prompt = f"""
92
  You are an expert {selected_agent} who responds with complete program coding to client requests.
93
  Using available tools, please explain the researched information.
@@ -111,6 +56,7 @@ Please make sure to answer in the language used by the user. If the user asks in
111
  But, you can go ahead and search in English, especially for programming-related questions. PLEASE MAKE SURE TO ALWAYS SEARCH IN ENGLISH FOR THOSE.
112
  """
113
 
 
114
  customCSS = """
115
  #component-7 { # dies ist die Standardelement-ID des Chatkomponenten
116
  height: 1600px; # passen Sie die Höhe nach Bedarf an
@@ -118,17 +64,20 @@ customCSS = """
118
  }
119
  """
120
 
121
- def toggle_agent(agent_name):
 
122
  """Toggles the active state of an agent."""
123
  global agent_roles
124
  agent_roles[agent_name]["active"] = not agent_roles[agent_name]["active"]
125
  return f"{agent_name} is now {'active' if agent_roles[agent_name]['active'] else 'inactive'}"
126
 
127
- def get_agent_cluster():
 
128
  """Returns a dictionary of active agents."""
129
  return {agent: agent_roles[agent]["active"] for agent in agent_roles}
130
 
131
- def run_code(code):
 
132
  """Executes the provided code and returns the output."""
133
  try:
134
  output = subprocess.check_output(
@@ -140,7 +89,69 @@ def run_code(code):
140
  except subprocess.CalledProcessError as e:
141
  return f"Error: {e.output}"
142
 
143
- def chat_interface(message, history, agent_cluster, temperature=0.9, max_new_tokens=2048, top_p=0.95, repetition_penalty=1.0):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
  """Handles user input and generates responses."""
145
  if message.startswith("python"):
146
  # User entered code, execute it
@@ -153,54 +164,205 @@ def chat_interface(message, history, agent_cluster, temperature=0.9, max_new_tok
153
  response = generate(message, history, active_agents, temperature, max_new_tokens, top_p, repetition_penalty)
154
  return (message, response)
155
 
156
- with gr.Blocks(theme='ParityError/Interstellar') as demo:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157
  with gr.Row():
158
  for agent_name, agent_data in agent_roles.items():
159
  button = gr.Button(agent_name, variant="secondary")
160
  textbox = gr.Textbox(agent_data["description"], interactive=False)
161
  button.click(toggle_agent, inputs=[button], outputs=[textbox])
162
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  with gr.Row():
164
- gr.ChatInterface(
165
- fn=chat_interface,
166
- chatbot=gr.Chatbot(), # Asegúrate de crear una instancia de Chatbot si es necesario
167
- additional_inputs=[
168
- gr.Slider(
169
- label="Temperature",
170
- value=0.9,
171
- minimum=0.0,
172
- maximum=1.0,
173
- step=0.05,
174
- interactive=True,
175
- info="Higher values generate more diverse outputs",
176
- ),
177
- gr.Slider(
178
- label="Maximum New Tokens",
179
- value=2048,
180
- minimum=64,
181
- maximum=4096,
182
- step=64,
183
- interactive=True,
184
- info="The maximum number of new tokens",
185
- ),
186
- gr.Slider(
187
- label="Top-p (Nucleus Sampling)",
188
- value=0.90,
189
- minimum=0.0,
190
- maximum=1,
191
- step=0.05,
192
- interactive=True,
193
- info="Higher values sample more low-probability tokens",
194
- ),
195
- gr.Slider(
196
- label="Repetition Penalty",
197
- value=1.2,
198
- minimum=1.0,
199
- maximum=2.0,
200
- step=0.05,
201
- interactive=True,
202
- info="Penalize repeated tokens",
203
- )
204
- ]
205
  )
206
- demo.queue().launch(debug=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import random
4
  import os
5
  import subprocess
6
+ import threading
7
+ import time
8
+ import shutil
9
+ from typing import Dict, Tuple
10
 
11
+ # Constants
12
  API_URL = "https://api-inference.huggingface.co/models/"
13
+ MODEL_NAME = "mistralai/Mixtral-8x7B-Instruct-v0.1" # Replace with your desired model
14
+ DEFAULT_TEMPERATURE = 0.9
15
+ DEFAULT_MAX_NEW_TOKENS = 2048
16
+ DEFAULT_TOP_P = 0.95
17
+ DEFAULT_REPETITION_PENALTY = 1.2
18
+ LOCAL_HOST_PORT = 7860
19
 
20
+ # Initialize the InferenceClient
21
+ client = InferenceClient(MODEL_NAME)
 
22
 
23
+ # Define agent roles and their initial states
24
+ agent_roles: Dict[str, Dict[str, bool]] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  "Web Developer": {"description": "A master of front-end and back-end web development.", "active": False},
26
  "Prompt Engineer": {"description": "An expert in crafting effective prompts for AI models.", "active": False},
27
  "Python Code Developer": {"description": "A skilled Python programmer who can write clean and efficient code.", "active": False},
 
32
  # Initialize the selected agent
33
  selected_agent = list(agent_roles.keys())[0]
34
 
35
+ # Initial prompt for the selected agent
36
  initial_prompt = f"""
37
  You are an expert {selected_agent} who responds with complete program coding to client requests.
38
  Using available tools, please explain the researched information.
 
56
  But, you can go ahead and search in English, especially for programming-related questions. PLEASE MAKE SURE TO ALWAYS SEARCH IN ENGLISH FOR THOSE.
57
  """
58
 
59
+ # Custom CSS for the chat interface
60
  customCSS = """
61
  #component-7 { # dies ist die Standardelement-ID des Chatkomponenten
62
  height: 1600px; # passen Sie die Höhe nach Bedarf an
 
64
  }
65
  """
66
 
67
+ # Function to toggle the active state of an agent
68
+ def toggle_agent(agent_name: str) -> str:
69
  """Toggles the active state of an agent."""
70
  global agent_roles
71
  agent_roles[agent_name]["active"] = not agent_roles[agent_name]["active"]
72
  return f"{agent_name} is now {'active' if agent_roles[agent_name]['active'] else 'inactive'}"
73
 
74
+ # Function to get the active agent cluster
75
+ def get_agent_cluster() -> Dict[str, bool]:
76
  """Returns a dictionary of active agents."""
77
  return {agent: agent_roles[agent]["active"] for agent in agent_roles}
78
 
79
+ # Function to execute code
80
+ def run_code(code: str) -> str:
81
  """Executes the provided code and returns the output."""
82
  try:
83
  output = subprocess.check_output(
 
89
  except subprocess.CalledProcessError as e:
90
  return f"Error: {e.output}"
91
 
92
+ # Function to format the prompt
93
+ def format_prompt(message: str, history: list[Tuple[str, str]], agent_roles: list[str]) -> str:
94
+ """Formats the prompt with the selected agent roles and conversation history."""
95
+ prompt = f"""
96
+ You are an expert agent cluster, consisting of {', '.join(agent_roles)}.
97
+ Respond with complete program coding to client requests.
98
+ Using available tools, please explain the researched information.
99
+ Please don't answer based solely on what you already know. Always perform a search before providing a response.
100
+ In special cases, such as when the user specifies a page to read, there's no need to search.
101
+ Please read the provided page and answer the user's question accordingly.
102
+ If you find that there's not much information just by looking at the search results page, consider these two options and try them out:
103
+ - Try clicking on the links of the search results to access and read the content of each page.
104
+ - Change your search query and perform a new search.
105
+ Users are extremely busy and not as free as you are.
106
+ Therefore, to save the user's effort, please provide direct answers.
107
+ BAD ANSWER EXAMPLE
108
+ - Please refer to these pages.
109
+ - You can write code referring these pages.
110
+ - Following page will be helpful.
111
+ GOOD ANSWER EXAMPLE
112
+ - This is the complete code: -- complete code here --
113
+ - The answer of you question is -- answer here --
114
+ Please make sure to list the URLs of the pages you referenced at the end of your answer. (This will allow users to verify your response.)
115
+ Please make sure to answer in the language used by the user. If the user asks in Japanese, please answer in Japanese. If the user asks in Spanish, please answer in Spanish.
116
+ But, you can go ahead and search in English, especially for programming-related questions. PLEASE MAKE SURE TO ALWAYS SEARCH IN ENGLISH FOR THOSE.
117
+ """
118
+
119
+ for user_prompt, bot_response in history:
120
+ prompt += f"[INST] {user_prompt} [/INST]"
121
+ prompt += f" {bot_response}</s> "
122
+
123
+ prompt += f"[INST] {message} [/INST]"
124
+ return prompt
125
+
126
+ # Function to generate a response
127
+ def generate(prompt: str, history: list[Tuple[str, str]], agent_roles: list[str], temperature: float = DEFAULT_TEMPERATURE, max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS, top_p: float = DEFAULT_TOP_P, repetition_penalty: float = DEFAULT_REPETITION_PENALTY) -> str:
128
+ """Generates a response using the selected agent roles and parameters."""
129
+ temperature = float(temperature)
130
+ if temperature < 1e-2:
131
+ temperature = 1e-2
132
+ top_p = float(top_p)
133
+
134
+ generate_kwargs = dict(
135
+ temperature=temperature,
136
+ max_new_tokens=max_new_tokens,
137
+ top_p=top_p,
138
+ repetition_penalty=repetition_penalty,
139
+ do_sample=True,
140
+ seed=random.randint(0, 10**7),
141
+ )
142
+
143
+ formatted_prompt = format_prompt(prompt, history, agent_roles)
144
+
145
+ stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
146
+ output = ""
147
+
148
+ for response in stream:
149
+ output += response.token.text
150
+ yield output
151
+ return output
152
+
153
+ # Function to handle user input and generate responses
154
+ def chat_interface(message: str, history: list[Tuple[str, str]], agent_cluster: Dict[str, bool], temperature: float = DEFAULT_TEMPERATURE, max_new_tokens: int = DEFAULT_MAX_NEW_TOKENS, top_p: float = DEFAULT_TOP_P, repetition_penalty: float = DEFAULT_REPETITION_PENALTY) -> Tuple[str, str]:
155
  """Handles user input and generates responses."""
156
  if message.startswith("python"):
157
  # User entered code, execute it
 
164
  response = generate(message, history, active_agents, temperature, max_new_tokens, top_p, repetition_penalty)
165
  return (message, response)
166
 
167
+ # Function to create a new web app instance
168
+ def create_web_app(app_name: str, code: str) -> None:
169
+ """Creates a new web app instance with the given name and code."""
170
+ # Create a new directory for the app
171
+ os.makedirs(app_name, exist_ok=True)
172
+
173
+ # Create the app.py file
174
+ with open(os.path.join(app_name, 'app.py'), 'w') as f:
175
+ f.write(code)
176
+
177
+ # Create the requirements.txt file
178
+ with open(os.path.join(app_name, 'requirements.txt'), 'w') as f:
179
+ f.write("gradio\nhuggingface_hub")
180
+
181
+ # Print a success message
182
+ print(f"Web app '{app_name}' created successfully!")
183
+
184
+ # Function to handle the "Create Web App" button click
185
+ def create_web_app_button_click(code: str) -> str:
186
+ """Handles the "Create Web App" button click."""
187
+ # Get the app name from the user
188
+ app_name = gr.Textbox.get().strip()
189
+
190
+ # Validate the app name
191
+ if not app_name:
192
+ return "Please enter a valid app name."
193
+
194
+ # Create the web app instance
195
+ create_web_app(app_name, code)
196
+
197
+ # Return a success message
198
+ return f"Web app '{app_name}' created successfully!"
199
+
200
+ # Function to handle the "Deploy" button click
201
+ def deploy_button_click(app_name: str, code: str) -> str:
202
+ """Handles the "Deploy" button click."""
203
+ # Get the app name from the user
204
+ app_name = gr.Textbox.get().strip()
205
+
206
+ # Validate the app name
207
+ if not app_name:
208
+ return "Please enter a valid app name."
209
+
210
+ # Deploy the web app instance
211
+ # ... (Implement deployment logic here)
212
+
213
+ # Return a success message
214
+ return f"Web app '{app_name}' deployed successfully!"
215
+
216
+ # Function to handle the "Local Host" button click
217
+ def local_host_button_click(app_name: str, code: str) -> str:
218
+ """Handles the "Local Host" button click."""
219
+ # Get the app name from the user
220
+ app_name = gr.Textbox.get().strip()
221
+
222
+ # Validate the app name
223
+ if not app_name:
224
+ return "Please enter a valid app name."
225
+
226
+ # Start the local server
227
+ os.chdir(app_name)
228
+ subprocess.Popen(['gradio', 'run', 'app.py', '--share', '--server_port', str(LOCAL_HOST_PORT)])
229
+
230
+ # Return a success message
231
+ return f"Web app '{app_name}' running locally on port {LOCAL_HOST_PORT}!"
232
+
233
+ # Function to handle the "Ship" button click
234
+ def ship_button_click(app_name: str, code: str) -> str:
235
+ """Handles the "Ship" button click."""
236
+ # Get the app name from the user
237
+ app_name = gr.Textbox.get().strip()
238
+
239
+ # Validate the app name
240
+ if not app_name:
241
+ return "Please enter a valid app name."
242
+
243
+ # Ship the web app instance
244
+ # ... (Implement shipping logic here)
245
+
246
+ # Return a success message
247
+ return f"Web app '{app_name}' shipped successfully!"
248
+
249
+ # Create the Gradio interface
250
+ with gr.Blocks(theme='ParityError/Interstellar') as demo:
251
+ # Agent selection area
252
  with gr.Row():
253
  for agent_name, agent_data in agent_roles.items():
254
  button = gr.Button(agent_name, variant="secondary")
255
  textbox = gr.Textbox(agent_data["description"], interactive=False)
256
  button.click(toggle_agent, inputs=[button], outputs=[textbox])
257
 
258
+ # Chat interface area
259
+ with gr.Row():
260
+ chatbot = gr.Chatbot()
261
+ chat_interface_input = gr.Textbox(label="Enter your message", placeholder="Ask me anything!")
262
+ chat_interface_output = gr.Textbox(label="Response", interactive=False)
263
+
264
+ # Parameters for the chat interface
265
+ temperature_slider = gr.Slider(
266
+ label="Temperature",
267
+ value=DEFAULT_TEMPERATURE,
268
+ minimum=0.0,
269
+ maximum=1.0,
270
+ step=0.05,
271
+ interactive=True,
272
+ info="Higher values generate more diverse outputs",
273
+ )
274
+ max_new_tokens_slider = gr.Slider(
275
+ label="Maximum New Tokens",
276
+ value=DEFAULT_MAX_NEW_TOKENS,
277
+ minimum=64,
278
+ maximum=4096,
279
+ step=64,
280
+ interactive=True,
281
+ info="The maximum number of new tokens",
282
+ )
283
+ top_p_slider = gr.Slider(
284
+ label="Top-p (Nucleus Sampling)",
285
+ value=DEFAULT_TOP_P,
286
+ minimum=0.0,
287
+ maximum=1,
288
+ step=0.05,
289
+ interactive=True,
290
+ info="Higher values sample more low-probability tokens",
291
+ )
292
+ repetition_penalty_slider = gr.Slider(
293
+ label="Repetition Penalty",
294
+ value=DEFAULT_REPETITION_PENALTY,
295
+ minimum=1.0,
296
+ maximum=2.0,
297
+ step=0.05,
298
+ interactive=True,
299
+ info="Penalize repeated tokens",
300
+ )
301
+
302
+ # Submit button for the chat interface
303
+ submit_button = gr.Button("Submit")
304
+
305
+ # Create the chat interface
306
+ submit_button.click(
307
+ chat_interface,
308
+ inputs=[
309
+ chat_interface_input,
310
+ chatbot,
311
+ get_agent_cluster,
312
+ temperature_slider,
313
+ max_new_tokens_slider,
314
+ top_p_slider,
315
+ repetition_penalty_slider,
316
+ ],
317
+ outputs=[
318
+ chatbot,
319
+ chat_interface_output,
320
+ ],
321
+ )
322
+
323
+ # Web app creation area
324
  with gr.Row():
325
+ app_name_input = gr.Textbox(label="App Name", placeholder="Enter your app name")
326
+ code_output = gr.Textbox(label="Code", interactive=False)
327
+ create_web_app_button = gr.Button("Create Web App")
328
+ deploy_button = gr.Button("Deploy")
329
+ local_host_button = gr.Button("Local Host")
330
+ ship_button = gr.Button("Ship")
331
+
332
+ # Create the web app creation interface
333
+ create_web_app_button.click(
334
+ create_web_app_button_click,
335
+ inputs=[code_output],
336
+ outputs=[gr.Textbox(label="Status", interactive=False)],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
337
  )
338
+
339
+ # Deploy the web app
340
+ deploy_button.click(
341
+ deploy_button_click,
342
+ inputs=[app_name_input, code_output],
343
+ outputs=[gr.Textbox(label="Status", interactive=False)],
344
+ )
345
+
346
+ # Local host the web app
347
+ local_host_button.click(
348
+ local_host_button_click,
349
+ inputs=[app_name_input, code_output],
350
+ outputs=[gr.Textbox(label="Status", interactive=False)],
351
+ )
352
+
353
+ # Ship the web app
354
+ ship_button.click(
355
+ ship_button_click,
356
+ inputs=[app_name_input, code_output],
357
+ outputs=[gr.Textbox(label="Status", interactive=False)],
358
+ )
359
+
360
+ # Connect the chat interface output to the code output
361
+ chat_interface_output.change(
362
+ lambda x: x,
363
+ inputs=[chat_interface_output],
364
+ outputs=[code_output],
365
+ )
366
+
367
+ # Launch the Gradio interface
368
+ demo.queue().launch(debug=True)