acecalisto3 commited on
Commit
8af33a6
·
verified ·
1 Parent(s): 96ebd64

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +315 -338
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import subprocess
3
  import random
4
- from huggingface_hub import InferenceClient
5
  import gradio as gr
6
  from safe_search import safe_search
7
  from i_search import google
@@ -21,364 +21,341 @@ from agent import (
21
  )
22
  from utils import parse_action, parse_file_content, read_python_module_structure
23
  from datetime import datetime
24
- now = datetime.now()
25
- date_time_str = now.strftime("%Y-%m-%d %H:%M:%S")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- client = InferenceClient(
28
- "mistralai/Mixtral-8x7B-Instruct-v0.1",
29
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
 
32
- ############################################
 
 
 
 
 
 
 
 
33
 
34
 
35
- VERBOSE = True
36
- MAX_HISTORY = 125
 
 
 
 
 
 
 
 
 
 
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
 
39
- def format_prompt(message, history):
40
- prompt = "<s>"
41
- for user_prompt, bot_response in history:
42
- prompt += f"[INST] {user_prompt} [/INST]"
43
- prompt += f" {bot_response}</s> "
44
- prompt += f"[INST] {message} [/INST]"
45
- return prompt
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
 
 
48
 
49
- def run_gpt(
50
- prompt_template,
51
- stop_tokens,
52
- max_tokens,
53
- purpose,
54
- **prompt_kwargs,
55
- ):
56
- seed = random.randint(1,1111111111111111)
57
- print (seed)
58
- generate_kwargs = dict(
59
- temperature=1.0,
60
- max_new_tokens=2096,
61
- top_p=0.99,
62
- repetition_penalty=1.7,
63
- do_sample=True,
64
- seed=seed,
65
- )
66
 
67
-
68
- content = PREFIX.format(
69
- date_time_str=date_time_str,
70
- purpose=purpose,
71
- safe_search=safe_search,
72
- ) + prompt_template.format(**prompt_kwargs)
73
- if VERBOSE:
74
- print(LOG_PROMPT.format(content))
75
-
76
-
77
- #formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
78
- #formatted_prompt = format_prompt(f'{content}', history)
79
-
80
- stream = client.text_generation(content, **generate_kwargs, stream=True, details=True, return_full_text=False)
81
- resp = ""
82
- for response in stream:
83
- resp += response.token.text
84
-
85
- if VERBOSE:
86
- print(LOG_RESPONSE.format(resp))
87
- return resp
88
-
89
-
90
- def compress_history(purpose, task, history, directory):
91
- resp = run_gpt(
92
- COMPRESS_HISTORY_PROMPT,
93
- stop_tokens=["observation:", "task:", "action:", "thought:"],
94
- max_tokens=5096,
95
- purpose=purpose,
96
- task=task,
97
- history=history,
98
- )
99
- history = "observation: {}\n".format(resp)
100
- return history
101
-
102
- def call_search(purpose, task, history, directory, action_input):
103
- print("CALLING SEARCH")
104
  try:
105
-
106
- if "http" in action_input:
107
- if "<" in action_input:
108
- action_input = action_input.strip("<")
109
- if ">" in action_input:
110
- action_input = action_input.strip(">")
111
-
112
- response = i_s(action_input)
113
- #response = google(search_return)
114
- print(response)
115
- history += "observation: search result is: {}\n".format(response)
116
- else:
117
- history += "observation: I need to provide a valid URL to 'action: SEARCH action_input=https://URL'\n"
118
- except Exception as e:
119
- history += "observation: {}'\n".format(e)
120
- return "MAIN", None, history, task
121
-
122
- def call_main(purpose, task, history, directory, action_input):
123
- resp = run_gpt(
124
- ACTION_PROMPT,
125
- stop_tokens=["observation:", "task:", "action:","though:"],
126
- max_tokens=5096,
127
- purpose=purpose,
128
- task=task,
129
- history=history,
130
- )
131
- lines = resp.strip().strip("\n").split("\n")
132
- for line in lines:
133
- if line == "":
134
- continue
135
- if line.startswith("thought: "):
136
- history += "{}\n".format(line)
137
- elif line.startswith("action: "):
138
-
139
- action_name, action_input = parse_action(line)
140
- print (f'ACTION_NAME :: {action_name}')
141
- print (f'ACTION_INPUT :: {action_input}')
142
-
143
- history += "{}\n".format(line)
144
- if "COMPLETE" in action_name or "COMPLETE" in action_input:
145
- task = "END"
146
- return action_name, action_input, history, task
147
- else:
148
- return action_name, action_input, history, task
149
- else:
150
- history += "{}\n".format(line)
151
- #history += "observation: the following command did not produce any useful output: '{}', I need to check the commands syntax, or use a different command\n".format(line)
152
-
153
- #return action_name, action_input, history, task
154
- #assert False, "unknown action: {}".format(line)
155
- return "MAIN", None, history, task
156
-
157
-
158
- def call_set_task(purpose, task, history, directory, action_input):
159
- task = run_gpt(
160
- TASK_PROMPT,
161
- stop_tokens=[],
162
- max_tokens=2048,
163
- purpose=purpose,
164
- task=task,
165
- history=history,
166
- ).strip("\n")
167
- history += "observation: task has been updated to: {}\n".format(task)
168
- return "MAIN", None, history, task
169
-
170
- def end_fn(purpose, task, history, directory, action_input):
171
- task = "END"
172
- return "COMPLETE", "COMPLETE", history, task
173
-
174
- NAME_TO_FUNC = {
175
- "MAIN": call_main,
176
- "UPDATE-TASK": call_set_task,
177
- "SEARCH": call_search,
178
- "COMPLETE": end_fn,
179
 
180
- }
181
 
182
- def run_action(purpose, task, history, directory, action_name, action_input):
183
- print(f'action_name::{action_name}')
 
 
 
 
 
 
 
 
 
 
 
 
184
  try:
185
- if "RESPONSE" in action_name or "COMPLETE" in action_name:
186
- action_name="COMPLETE"
187
- task="END"
188
- return action_name, "COMPLETE", history, task
189
-
190
- # compress the history when it is long
191
- if len(history.split("\n")) > MAX_HISTORY:
192
- if VERBOSE:
193
- print("COMPRESSING HISTORY")
194
- history = compress_history(purpose, task, history, directory)
195
- if not action_name in NAME_TO_FUNC:
196
- action_name="MAIN"
197
- if action_name == "" or action_name == None:
198
- action_name="MAIN"
199
- assert action_name in NAME_TO_FUNC
200
-
201
- print("RUN: ", action_name, action_input)
202
- return NAME_TO_FUNC[action_name](purpose, task, history, directory, action_input)
 
 
 
 
 
 
 
 
 
 
203
  except Exception as e:
204
- history += "observation: the previous command did not produce any useful output, I need to check the commands syntax, or use a different command\n"
205
-
206
- return "MAIN", None, history, task
207
-
208
- def run(purpose,history):
209
-
210
- #print(purpose)
211
- #print(hist)
212
- task=None
213
- directory="./"
214
- if history:
215
- history=str(history).strip("[]")
216
- if not history:
217
- history = ""
218
-
219
- action_name = "UPDATE-TASK" if task is None else "MAIN"
220
- action_input = None
221
- while True:
222
- print("")
223
- print("")
224
- print("---")
225
- print("purpose:", purpose)
226
- print("task:", task)
227
- print("---")
228
- print(history)
229
- print("---")
230
-
231
- action_name, action_input, history, task = run_action(
232
- purpose,
233
- task,
234
- history,
235
- directory,
236
- action_name,
237
- action_input,
238
  )
239
- yield (history)
240
- #yield ("",[(purpose,history)])
241
- if task == "END":
242
- return (history)
243
- #return ("", [(purpose,history)])
244
-
245
-
246
-
247
- ################################################
248
-
249
- def format_prompt(message, history):
250
- prompt = "<s>"
251
- for user_prompt, bot_response in history:
252
- prompt += f"[INST] {user_prompt} [/INST]"
253
- prompt += f" {bot_response}</s> "
254
- prompt += f"[INST] {message} [/INST]"
255
- return prompt
256
- agents =[
257
- "WEB_DEV",
258
- "AI_SYSTEM_PROMPT",
259
- "PYTHON_CODE_DEV"
260
- ]
261
- def generate(
262
- prompt, history, agent_name=agents[0], sys_prompt="", temperature=0.9, max_new_tokens=256, top_p=0.95, repetition_penalty=1.7,
263
- ):
264
- seed = random.randint(1,1111111111111111)
265
-
266
- agent=prompts.WEB_DEV
267
- if agent_name == "WEB_DEV":
268
- agent = prompts.WEB_DEV
269
- if agent_name == "AI_SYSTEM_PROMPT":
270
- agent = prompts.AI_SYSTEM_PROMPT
271
- if agent_name == "PYTHON_CODE_DEV":
272
- agent = prompts.PYTHON_CODE_DEV
273
- system_prompt=agent
274
- temperature = float(temperature)
275
- if temperature < 1e-2:
276
- temperature = 1e-2
277
- top_p = float(top_p)
278
-
279
- generate_kwargs = dict(
280
- temperature=temperature,
281
- max_new_tokens=max_new_tokens,
282
- top_p=top_p,
283
- repetition_penalty=repetition_penalty,
284
- do_sample=True,
285
- seed=seed,
286
- )
287
 
288
- formatted_prompt = format_prompt(f"{system_prompt}, {prompt}", history)
289
- stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=True, details=True, return_full_text=False)
290
- output = ""
 
 
 
 
 
 
 
 
 
 
 
 
 
291
 
292
- for response in stream:
293
- output += response.token.text
294
- yield output
295
- return output
296
-
297
-
298
- additional_inputs=[
299
- gr.Dropdown(
300
- label="Agents",
301
- choices=[s for s in agents],
302
- value=agents[0],
303
- interactive=True,
304
- ),
305
- gr.Textbox(
306
- label="System Prompt",
307
- max_lines=1,
308
- interactive=True,
309
- ),
310
- gr.Slider(
311
- label="Temperature",
312
- value=0.9,
313
- minimum=0.0,
314
- maximum=1.0,
315
- step=0.05,
316
- interactive=True,
317
- info="Higher values produce more diverse outputs",
318
- ),
319
-
320
- gr.Slider(
321
- label="Max new tokens",
322
- value=1048*10,
323
- minimum=0,
324
- maximum=1048*10,
325
- step=64,
326
- interactive=True,
327
- info="The maximum numbers of new tokens",
328
- ),
329
- gr.Slider(
330
- label="Top-p (nucleus sampling)",
331
- value=0.90,
332
- minimum=0.0,
333
- maximum=1,
334
- step=0.05,
335
- interactive=True,
336
- info="Higher values sample more low-probability tokens",
337
- ),
338
- gr.Slider(
339
- label="Repetition penalty",
340
- value=1.2,
341
- minimum=1.0,
342
- maximum=2.0,
343
- step=0.05,
344
- interactive=True,
345
- info="Penalize repeated tokens",
346
- ),
347
-
348
-
349
- ]
350
-
351
- examples = [
352
- ["What is the latest version of Python and what are the new features in this version?", None, None, None, None, None, ],
353
- ["Can you explain the difference between a class and an object in object-oriented programming?", None, None, None, None, None, ],
354
- ["What are the benefits of using a version control system like Git, and how can I get started with it?", None, None, None, None, None, ],
355
- ["I'm trying to build a web application using a microservices architecture. Can you suggest some tools and frameworks that would be suitable for this project?", None, None, None, None, None, ],
356
- ["Can you explain the concept of functional programming and how it can be used in modern programming languages?", None, None, None, None, None, ],
357
- ["What are the best practices for writing clean and maintainable code, and how can I ensure that my code adheres to these standards?", None, None, None, None, None, ],
358
- ["I'm working on a machine learning project and I need to preprocess my data. Can you suggest some libraries and tools that would be useful for this task?", None, None, None, None, None, ],
359
- ["What are the key differences between SQL and NoSQL databases, and when should I use each type of database?", None, None, None, None, None, ],
360
- ]
361
-
362
-
363
- # Define the main Gradio interface
364
- with gr.Blocks() as iface:
365
- # Chat Interface
366
- chat_interface = gr.ChatInterface(
367
- fn=run,
368
- title="""Fragmixt: Autonomous Agents With Agents...Surf With a Purpose!""",
369
- examples=examples,
370
- concurrency_limit=20,
371
- )
372
 
373
- # Other components within the same interface
374
  with gr.Row():
375
- msg = gr.Textbox()
376
- submit_b = gr.Button()
377
- clear = gr.ClearButton([msg, chat_interface])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
378
 
379
- # Event handling
380
- submit_b.click(run, [msg, chat_interface], [msg, chat_interface])
381
- msg.submit(run, [msg, chat_interface], [msg, chat_interface])
382
 
383
- # Launch the interface
 
 
 
 
 
 
 
 
384
  iface.launch()
 
1
  import os
2
  import subprocess
3
  import random
4
+ from huggingface_hub import InferenceClient, cached_download, hf_hub_url
5
  import gradio as gr
6
  from safe_search import safe_search
7
  from i_search import google
 
21
  )
22
  from utils import parse_action, parse_file_content, read_python_module_structure
23
  from datetime import datetime
24
+ import json
25
+
26
+ # --- Global Variables for App State ---
27
+ app_state = {"components": []}
28
+ terminal_history = ""
29
+
30
+ # --- Component Library ---
31
+ components_registry = {
32
+ "Button": {
33
+ "properties": {"label": "Click Me", "onclick": ""},
34
+ "description": "A clickable button",
35
+ "code_snippet": 'gr.Button(value="{label}", variant="primary")',
36
+ },
37
+ "Text Input": {
38
+ "properties": {"value": "", "placeholder": "Enter text"},
39
+ "description": "A field for entering text",
40
+ "code_snippet": 'gr.Textbox(label="{placeholder}")',
41
+ },
42
+ "Image": {
43
+ "properties": {"src": "#", "alt": "Image"},
44
+ "description": "Displays an image",
45
+ "code_snippet": 'gr.Image(label="{alt}")',
46
+ },
47
+ "Dropdown": {
48
+ "properties": {"choices": ["Option 1", "Option 2"], "value": ""},
49
+ "description": "A dropdown menu for selecting options",
50
+ "code_snippet": 'gr.Dropdown(choices={choices}, label="Dropdown")',
51
+ },
52
+ # Add more components here...
53
+ }
54
 
55
+ # --- NLP Model (Example using Hugging Face) ---
56
+ nlp_model_name = "google/flan-t5-small"
57
+ # Check if the model exists in the cache
58
+ try:
59
+ cached_download(hf_hub_url(nlp_model_name, revision="main"))
60
+ nlp_model = InferenceClient(nlp_model_name)
61
+ except:
62
+ nlp_model = None
63
+
64
+ # --- Function to get NLP model response ---
65
+ def get_nlp_response(input_text):
66
+ if nlp_model:
67
+ response = nlp_model.text_generation(input_text)
68
+ return response.generated_text
69
+ else:
70
+ return "NLP model not available."
71
+
72
+ # --- Component Class ---
73
+ class Component:
74
+ def __init__(self, type, properties=None, id=None):
75
+ self.id = id or random.randint(1000, 9999)
76
+ self.type = type
77
+ self.properties = properties or components_registry[type]["properties"].copy()
78
+
79
+ def to_dict(self):
80
+ return {
81
+ "id": self.id,
82
+ "type": self.type,
83
+ "properties": self.properties,
84
+ }
85
+
86
+ def render(self):
87
+ # Properly format choices for Dropdown
88
+ if self.type == "Dropdown":
89
+ self.properties["choices"] = (
90
+ str(self.properties["choices"])
91
+ .replace("[", "")
92
+ .replace("]", "")
93
+ .replace("'", "")
94
+ )
95
+ return components_registry[self.type]["code_snippet"].format(
96
+ **self.properties
97
+ )
98
 
99
 
100
+ # --- Function to update the app canvas (for preview) ---
101
+ def update_app_canvas():
102
+ components_html = "".join(
103
+ [
104
+ f"<div>Component ID: {component['id']}, Type: {component['type']}, Properties: {component['properties']}</div>"
105
+ for component in app_state["components"]
106
+ ]
107
+ )
108
+ return components_html
109
 
110
 
111
+ # --- Function to handle component addition ---
112
+ def add_component(component_type):
113
+ if component_type in components_registry:
114
+ new_component = Component(component_type)
115
+ app_state["components"].append(new_component.to_dict())
116
+ return (
117
+ update_app_canvas(),
118
+ f"System: Added component: {component_type}\n",
119
+ )
120
+ else:
121
+ return None, f"Error: Invalid component type: {component_type}\n"
122
+
123
 
124
+ # --- Function to handle terminal input ---
125
+ def run_terminal_command(command, history):
126
+ global terminal_history
127
+ output = ""
128
+ try:
129
+ # Basic command parsing (expand with NLP)
130
+ if command.startswith("add "):
131
+ component_type = command.split("add ", 1)[1].strip()
132
+ _, output = add_component(component_type)
133
+ elif command.startswith("set "):
134
+ _, output = set_component_property(command)
135
+ elif command.startswith("search "):
136
+ search_query = command.split("search ", 1)[1].strip()
137
+ output = i_s(search_query)
138
+ else:
139
+ # Attempt to execute command as Python code
140
+ try:
141
+ result = subprocess.check_output(
142
+ command, shell=True, stderr=subprocess.STDOUT, text=True
143
+ )
144
+ output = result
145
+ except Exception as e:
146
+ output = f"Error executing Python code: {str(e)}"
147
+ except Exception as e:
148
+ output = f"Error: {str(e)}"
149
+ finally:
150
+ terminal_history += f"User: {command}\n"
151
+ terminal_history += f"{output}\n"
152
+ return terminal_history
153
 
 
 
 
 
 
 
 
154
 
155
+ def set_component_property(command):
156
+ try:
157
+ # Improved 'set' command parsing
158
+ set_parts = command.split(" ", 2)[1:]
159
+ if len(set_parts) != 2:
160
+ raise ValueError("Invalid 'set' command format.")
161
+
162
+ component_id = int(set_parts[0]) # Use component ID
163
+ property_name, property_value = set_parts[1].split("=", 1)
164
+
165
+ # Find component by ID
166
+ component_found = False
167
+ for component in app_state["components"]:
168
+ if component["id"] == component_id:
169
+ if property_name in component["properties"]:
170
+ component["properties"][
171
+ property_name.strip()
172
+ ] = property_value.strip()
173
+ component_found = True
174
+ return (
175
+ update_app_canvas(),
176
+ f"System: Property '{property_name}' set to '{property_value}' for component {component_id}\n",
177
+ )
178
+ else:
179
+ return (
180
+ None,
181
+ f"Error: Property '{property_name}' not found in component {component_id}\n",
182
+ )
183
+ if not component_found:
184
+ return (
185
+ None,
186
+ f"Error: Component with ID {component_id} not found.\n",
187
+ )
188
 
189
+ except Exception as e:
190
+ return None, f"Error: Invalid 'set' command format or error setting property: {str(e)}\n"
191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
192
 
193
+ # --- Function to handle chat interaction ---
194
+ def run_chat(message, history):
195
+ global terminal_history
196
+ if message.startswith("!"):
197
+ command = message[1:]
198
+ terminal_history = run_terminal_command(command, history)
199
+ return history, terminal_history
200
+ else:
201
+ # ... (Your regular chat response generation)
202
+ return history, terminal_history
203
+
204
+
205
+ # --- Code Generation ---
206
+ def generate_python_code(app_name):
207
+ code = f"""
208
+ import gradio as gr
209
+
210
+ # Define your Gradio components here
211
+ with gr.Blocks() as {app_name}:
212
+ """
213
+ for component in app_state["components"]:
214
+ code += " " + Component(**component).render() + "\n"
215
+
216
+ code += f"""
217
+ {app_name}.launch()
218
+ """
219
+ return code
220
+
221
+
222
+ # --- Save/Load App State ---
223
+ def save_app_state(filename="app_state.json"):
224
+ with open(filename, "w") as f:
225
+ json.dump(app_state, f)
226
+
227
+
228
+ def load_app_state(filename="app_state.json"):
229
+ global app_state
230
  try:
231
+ with open(filename, "r") as f:
232
+ app_state = json.load(f)
233
+ except FileNotFoundError:
234
+ print("App state file not found. Starting with a blank slate.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
 
 
236
 
237
+ # --- Hugging Face Deployment ---
238
+ def deploy_to_huggingface(app_name):
239
+ # Generate Python code
240
+ code = generate_python_code(app_name)
241
+
242
+ # Create requirements.txt
243
+ with open("requirements.txt", "w") as f:
244
+ f.write("gradio==3.32.0\n")
245
+
246
+ # Create the app.py file
247
+ with open("app.py", "w") as f:
248
+ f.write(code)
249
+
250
+ # Execute the deployment command
251
  try:
252
+ subprocess.run(
253
+ [
254
+ "huggingface-cli",
255
+ "repo",
256
+ "create",
257
+ "--type",
258
+ "space",
259
+ "--space_sdk",
260
+ "gradio",
261
+ app_name,
262
+ ],
263
+ check=True,
264
+ )
265
+ subprocess.run(
266
+ ["git", "init"], cwd=f"./{app_name}", check=True
267
+ )
268
+ subprocess.run(
269
+ ["git", "add", "."], cwd=f"./{app_name}", check=True
270
+ )
271
+ subprocess.run(
272
+ ['git', 'commit', '-m', '"Initial commit"'], cwd=f"./{app_name}", check=True
273
+ )
274
+ subprocess.run(
275
+ ["git", "push", "https://huggingface.co/spaces/" + app_name, "main"], cwd=f"./{app_name}", check=True
276
+ )
277
+ return (
278
+ f"Successfully deployed to Hugging Face Spaces: https://huggingface.co/spaces/{app_name}"
279
+ )
280
  except Exception as e:
281
+ return f"Error deploying to Hugging Face Spaces: {e}"
282
+
283
+
284
+ # --- Gradio Interface ---
285
+ with gr.Blocks() as iface:
286
+ with gr.Row():
287
+ # --- Chat Interface ---
288
+ chat_history = gr.Chatbot(label="Chat with Agent")
289
+ chat_input = gr.Textbox(label="Your Message")
290
+ chat_button = gr.Button("Send")
291
+
292
+ chat_button.click(
293
+ run_chat,
294
+ inputs=[chat_input, chat_history],
295
+ outputs=[chat_history, terminal_output],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
296
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
297
 
298
+ with gr.Row():
299
+ # --- App Builder Section ---
300
+ app_canvas = gr.HTML(
301
+ "<div>App Canvas Preview:</div>", label="App Canvas"
302
+ )
303
+ with gr.Column():
304
+ component_list = gr.Dropdown(
305
+ choices=list(components_registry.keys()), label="Components"
306
+ )
307
+ add_button = gr.Button("Add Component")
308
+
309
+ add_button.click(
310
+ add_component,
311
+ inputs=component_list,
312
+ outputs=[app_canvas, terminal_output],
313
+ )
314
 
315
+ with gr.Row():
316
+ # --- Terminal ---
317
+ terminal_output = gr.Textbox(
318
+ lines=8, label="Terminal", value=terminal_history
319
+ )
320
+ terminal_input = gr.Textbox(label="Enter Command")
321
+ terminal_button = gr.Button("Run")
322
+
323
+ terminal_button.click(
324
+ run_terminal_command,
325
+ inputs=[terminal_input, terminal_output],
326
+ outputs=terminal_output,
327
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
 
 
329
  with gr.Row():
330
+ # --- Code Generation ---
331
+ code_output = gr.Code(
332
+ generate_python_code("app_name"),
333
+ language="python",
334
+ label="Generated Code",
335
+ )
336
+ app_name_input = gr.Textbox(label="App Name")
337
+ generate_code_button = gr.Button("Generate Code")
338
+ generate_code_button.click(
339
+ generate_python_code,
340
+ inputs=[app_name_input],
341
+ outputs=code_output,
342
+ )
343
+
344
+ with gr.Row():
345
+ # --- Save/Load Buttons ---
346
+ save_button = gr.Button("Save App State")
347
+ load_button = gr.Button("Load App State")
348
 
349
+ save_button.click(save_app_state)
350
+ load_button.click(load_app_state)
 
351
 
352
+ with gr.Row():
353
+ # --- Deploy Button ---
354
+ deploy_button = gr.Button("Deploy to Hugging Face")
355
+ deploy_output = gr.Textbox(label="Deployment Output")
356
+ deploy_button.click(
357
+ deploy_to_huggingface,
358
+ inputs=[app_name_input],
359
+ outputs=[deploy_output],
360
+ )
361
  iface.launch()