Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import logging
|
3 |
import json
|
@@ -5,16 +15,27 @@ import re
|
|
5 |
import torch
|
6 |
import tempfile
|
7 |
import subprocess
|
|
|
8 |
from pathlib import Path
|
9 |
-
from typing import Dict, List, Tuple, Optional, Any
|
10 |
-
from dataclasses import dataclass
|
11 |
from enum import Enum
|
12 |
-
from transformers import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
# Configure logging
|
15 |
logging.basicConfig(
|
16 |
level=logging.INFO,
|
17 |
-
format='%(asctime)s - %(levelname)s - %(message)s',
|
18 |
handlers=[
|
19 |
logging.StreamHandler(),
|
20 |
logging.FileHandler('gradio_builder.log')
|
@@ -22,1349 +43,1094 @@ logging.basicConfig(
|
|
22 |
)
|
23 |
logger = logging.getLogger(__name__)
|
24 |
|
25 |
-
# Constants
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
"properties": {
|
31 |
-
"label": "Text Input",
|
32 |
-
"placeholder": "",
|
33 |
-
"lines": 1,
|
34 |
-
"type": "text"
|
35 |
-
},
|
36 |
-
"code_snippet": 'gr.Textbox(label="{label}", placeholder="{placeholder}", lines={lines}, type="{type}")'
|
37 |
-
},
|
38 |
-
"Number": {
|
39 |
-
"description": "Numeric input component",
|
40 |
-
"properties": {
|
41 |
-
"label": "Number Input",
|
42 |
-
"value": 0,
|
43 |
-
"minimum": None,
|
44 |
-
"maximum": None
|
45 |
-
},
|
46 |
-
"code_snippet": 'gr.Number(label="{label}", value={value}, minimum={minimum}, maximum={maximum})'
|
47 |
-
},
|
48 |
-
"Button": {
|
49 |
-
"description": "Clickable button component",
|
50 |
-
"properties": {
|
51 |
-
"text": "Button",
|
52 |
-
"variant": "primary"
|
53 |
-
},
|
54 |
-
"code_snippet": 'gr.Button(value="{text}", variant="{variant}")'
|
55 |
-
}
|
56 |
-
},
|
57 |
-
"Media": {
|
58 |
-
"Image": {
|
59 |
-
"description": "Image display/upload component",
|
60 |
-
"properties": {
|
61 |
-
"label": "Image",
|
62 |
-
"shape": None,
|
63 |
-
"image_mode": "RGB",
|
64 |
-
"source": "upload",
|
65 |
-
"type": "numpy"
|
66 |
-
},
|
67 |
-
"code_snippet": 'gr.Image(label="{label}", shape={shape}, image_mode="{image_mode}", source="{source}", type="{type}")'
|
68 |
-
},
|
69 |
-
"Audio": {
|
70 |
-
"description": "Audio player/recorder component",
|
71 |
-
"properties": {
|
72 |
-
"label": "Audio",
|
73 |
-
"source": "upload",
|
74 |
-
"type": "numpy"
|
75 |
-
},
|
76 |
-
"code_snippet": 'gr.Audio(label="{label}", source="{source}", type="{type}")'
|
77 |
-
},
|
78 |
-
"Video": {
|
79 |
-
"description": "Video player component",
|
80 |
-
"properties": {
|
81 |
-
"label": "Video",
|
82 |
-
"source": "upload"
|
83 |
-
},
|
84 |
-
"code_snippet": 'gr.Video(label="{label}", source="{source}")'
|
85 |
-
}
|
86 |
-
},
|
87 |
-
"Selection": {
|
88 |
-
"Dropdown": {
|
89 |
-
"description": "Dropdown selection component",
|
90 |
-
"properties": {
|
91 |
-
"label": "Dropdown",
|
92 |
-
"choices": [],
|
93 |
-
"multiselect": False
|
94 |
-
},
|
95 |
-
"code_snippet": 'gr.Dropdown(label="{label}", choices={choices}, multiselect={multiselect})'
|
96 |
-
},
|
97 |
-
"Radio": {
|
98 |
-
"description": "Radio button group component",
|
99 |
-
"properties": {
|
100 |
-
"label": "Radio Group",
|
101 |
-
"choices": [],
|
102 |
-
"type": "value"
|
103 |
-
},
|
104 |
-
"code_snippet": 'gr.Radio(label="{label}", choices={choices}, type="{type}")'
|
105 |
-
},
|
106 |
-
"Checkbox": {
|
107 |
-
"description": "Checkbox component",
|
108 |
-
"properties": {
|
109 |
-
"label": "Checkbox",
|
110 |
-
"value": False
|
111 |
-
},
|
112 |
-
"code_snippet": 'gr.Checkbox(label="{label}", value={value})'
|
113 |
-
}
|
114 |
-
}
|
115 |
-
}
|
116 |
-
|
117 |
-
TEMPLATE_CATEGORIES = {
|
118 |
-
"Basic": {
|
119 |
-
"Empty": {
|
120 |
-
"description": "Empty application template",
|
121 |
-
"components": [],
|
122 |
-
"layout": "vertical",
|
123 |
-
"css": "",
|
124 |
-
"dependencies": []
|
125 |
-
},
|
126 |
-
"Text Input/Output": {
|
127 |
-
"description": "Simple text input/output template",
|
128 |
-
"components": [
|
129 |
-
{
|
130 |
-
"type": "Textbox",
|
131 |
-
"properties": {
|
132 |
-
"label": "Input",
|
133 |
-
"lines": 3
|
134 |
-
}
|
135 |
-
},
|
136 |
-
{
|
137 |
-
"type": "Button",
|
138 |
-
"properties": {
|
139 |
-
"text": "Process",
|
140 |
-
"variant": "primary"
|
141 |
-
}
|
142 |
-
},
|
143 |
-
{
|
144 |
-
"type": "Textbox",
|
145 |
-
"properties": {
|
146 |
-
"label": "Output",
|
147 |
-
"lines": 3
|
148 |
-
}
|
149 |
-
}
|
150 |
-
],
|
151 |
-
"layout": "vertical",
|
152 |
-
"css": "",
|
153 |
-
"dependencies": []
|
154 |
-
}
|
155 |
-
},
|
156 |
-
"Media": {
|
157 |
-
"Image Processing": {
|
158 |
-
"description": "Image processing template",
|
159 |
-
"components": [
|
160 |
-
{
|
161 |
-
"type": "Image",
|
162 |
-
"properties": {
|
163 |
-
"label": "Input Image",
|
164 |
-
"source": "upload"
|
165 |
-
}
|
166 |
-
},
|
167 |
-
{
|
168 |
-
"type": "Button",
|
169 |
-
"properties": {
|
170 |
-
"text": "Process",
|
171 |
-
"variant": "primary"
|
172 |
-
}
|
173 |
-
},
|
174 |
-
{
|
175 |
-
"type": "Image",
|
176 |
-
"properties": {
|
177 |
-
"label": "Output Image",
|
178 |
-
"source": "upload"
|
179 |
-
}
|
180 |
-
}
|
181 |
-
],
|
182 |
-
"layout": "vertical",
|
183 |
-
"css": "",
|
184 |
-
"dependencies": ["PIL"]
|
185 |
-
}
|
186 |
-
}
|
187 |
-
}
|
188 |
-
|
189 |
-
DEFAULT_THEMES = {
|
190 |
-
"Light": """
|
191 |
-
/* Light theme CSS */
|
192 |
-
.gradio-container {
|
193 |
-
font-family: 'Arial', sans-serif;
|
194 |
-
background-color: #ffffff;
|
195 |
-
}
|
196 |
-
.gradio-button {
|
197 |
-
background-color: #2196F3;
|
198 |
-
color: white;
|
199 |
-
}
|
200 |
-
""",
|
201 |
-
"Dark": """
|
202 |
-
/* Dark theme CSS */
|
203 |
-
.gradio-container {
|
204 |
-
font-family: 'Arial', sans-serif;
|
205 |
-
background-color: #2c2c2c;
|
206 |
-
color: #ffffff;
|
207 |
-
}
|
208 |
-
.gradio-button {
|
209 |
-
background-color: #bb86fc;
|
210 |
-
color: black;
|
211 |
-
}
|
212 |
-
"""
|
213 |
-
}
|
214 |
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
ASSISTANT = "assistant"
|
219 |
|
220 |
@dataclass
|
221 |
-
class
|
222 |
-
|
223 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
self.chat_pipeline = pipeline(
|
237 |
-
"text-generation",
|
238 |
-
model=self.model,
|
239 |
-
tokenizer=self.tokenizer,
|
240 |
-
max_length=2048,
|
241 |
-
do_sample=True,
|
242 |
-
temperature=0.7,
|
243 |
-
top_p=0.95,
|
244 |
-
pad_token_id=self.tokenizer.eos_token_id
|
245 |
-
)
|
246 |
-
|
247 |
-
self.chat_history: List[ChatMessage] = []
|
248 |
-
self.current_app: Dict = {}
|
249 |
-
self.error_log: List[str] = []
|
250 |
-
|
251 |
-
# Initialize system prompt
|
252 |
-
self.initialize_system_prompt()
|
253 |
-
|
254 |
-
except Exception as e:
|
255 |
-
logger.error(f"Error initializing AI Builder: {str(e)}")
|
256 |
-
raise
|
257 |
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
1. Understand user requirements and convert them into Gradio components
|
262 |
-
2. Generate and modify Python code for Gradio applications
|
263 |
-
3. Help users customize their applications through natural language instructions
|
264 |
-
4. Provide clear explanations of changes and suggestions for improvements
|
265 |
-
|
266 |
-
When generating code:
|
267 |
-
1. Use proper Python and Gradio syntax
|
268 |
-
2. Include necessary imports
|
269 |
-
3. Structure the code clearly
|
270 |
-
4. Add comments for clarity
|
271 |
-
5. Handle errors appropriately
|
272 |
-
|
273 |
-
When responding to modifications:
|
274 |
-
1. Explain what changes will be made
|
275 |
-
2. Show the modified code
|
276 |
-
3. Highlight any potential issues
|
277 |
-
4. Suggest improvements if applicable
|
278 |
-
|
279 |
-
Available Components:
|
280 |
-
{components}
|
281 |
-
|
282 |
-
Available Templates:
|
283 |
-
{templates}
|
284 |
-
|
285 |
-
Response Format:
|
286 |
-
1. Brief explanation of understanding
|
287 |
-
2. Proposed solution
|
288 |
-
3. Code block:
|
289 |
-
```python
|
290 |
-
# Code here
|
291 |
-
```
|
292 |
-
4. Additional explanations or suggestions
|
293 |
-
"""
|
294 |
-
|
295 |
-
# Format prompt with available components and templates
|
296 |
-
components_str = self._format_components_for_prompt()
|
297 |
-
templates_str = self._format_templates_for_prompt()
|
298 |
-
|
299 |
-
self.system_prompt = system_prompt.format(
|
300 |
-
components=components_str,
|
301 |
-
templates=templates_str
|
302 |
-
)
|
303 |
-
self.chat_history.append(ChatMessage(ChatRole.SYSTEM, self.system_prompt))
|
304 |
-
|
305 |
-
def _format_components_for_prompt(self) -> str:
|
306 |
-
"""Format component information for the system prompt"""
|
307 |
-
components_info = []
|
308 |
-
for category, components in COMPONENT_CATEGORIES.items():
|
309 |
-
category_info = [f"\n{category}:"]
|
310 |
-
for name, info in components.items():
|
311 |
-
props = ", ".join(info["properties"].keys())
|
312 |
-
category_info.append(f" - {name}: {info['description']} (Properties: {props})")
|
313 |
-
components_info.extend(category_info)
|
314 |
-
return "\n".join(components_info)
|
315 |
-
|
316 |
-
def _format_templates_for_prompt(self) -> str:
|
317 |
-
"""Format template information for the system prompt"""
|
318 |
-
templates_info = []
|
319 |
-
for category, templates in TEMPLATE_CATEGORIES.items():
|
320 |
-
category_info = [f"\n{category}:"]
|
321 |
-
for name, info in templates.items():
|
322 |
-
category_info.append(f" - {name}: {info['description']}")
|
323 |
-
templates_info.extend(category_info)
|
324 |
-
return "\n".join(templates_info)
|
325 |
-
|
326 |
-
async def generate_response(self, user_input: str) -> str:
|
327 |
-
"""Generate AI response using HuggingFace model"""
|
328 |
-
try:
|
329 |
-
# Format conversation history
|
330 |
-
conversation = self._format_conversation_history()
|
331 |
-
conversation += f"\nuser: {user_input}\nassistant:"
|
332 |
-
|
333 |
-
# Generate response
|
334 |
-
response = self.chat_pipeline(
|
335 |
-
conversation,
|
336 |
-
max_new_tokens=1024,
|
337 |
-
do_sample=True,
|
338 |
-
temperature=0.7,
|
339 |
-
top_p=0.95
|
340 |
-
)[0]['generated_text']
|
341 |
-
|
342 |
-
# Extract assistant's response
|
343 |
-
response = response.split("assistant:")[-1].strip()
|
344 |
-
|
345 |
-
# Update chat history
|
346 |
-
self.chat_history.append(ChatMessage(ChatRole.USER, user_input))
|
347 |
-
self.chat_history.append(ChatMessage(ChatRole.ASSISTANT, response))
|
348 |
-
|
349 |
-
return response
|
350 |
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
self.error_log.append(error_msg)
|
355 |
-
raise
|
356 |
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
363 |
|
364 |
-
|
365 |
-
|
|
|
|
|
|
|
|
|
366 |
try:
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
378 |
|
379 |
-
|
380 |
-
|
381 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
382 |
|
383 |
-
|
|
|
384 |
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
393 |
}
|
|
|
394 |
|
395 |
-
def
|
396 |
-
"""
|
397 |
try:
|
398 |
-
|
399 |
-
|
400 |
-
if components:
|
401 |
-
self.current_app["components"] = components
|
402 |
-
|
403 |
-
# Update layout if present
|
404 |
-
layout_match = re.search(r"with gr\.(Row|Column|Tabs)\(\):", code)
|
405 |
-
if layout_match:
|
406 |
-
layout_type = layout_match.group(1).lower()
|
407 |
-
self.current_app["layout"] = layout_type
|
408 |
-
|
409 |
-
# Extract CSS if present
|
410 |
-
css_match = re.search(r'css = """(.*?)"""', code, re.DOTALL)
|
411 |
-
if css_match:
|
412 |
-
self.current_app["css"] = css_match.group(1)
|
413 |
|
414 |
-
|
415 |
-
|
416 |
-
logger.error(error_msg)
|
417 |
-
self.error_log.append(error_msg)
|
418 |
-
raise
|
419 |
|
420 |
-
|
421 |
-
|
422 |
-
self.components: List[Dict] = []
|
423 |
-
self.error_log: List[str] = []
|
424 |
-
|
425 |
-
def add_component(self, category: str, component_type: str, properties: Dict = None) -> Optional[Dict]:
|
426 |
-
"""Add a new component with specified properties"""
|
427 |
-
try:
|
428 |
-
if category not in COMPONENT_CATEGORIES or component_type not in COMPONENT_CATEGORIES[category]:
|
429 |
-
raise ValueError(f"Invalid component type: {category}/{component_type}")
|
430 |
-
|
431 |
-
component_info = COMPONENT_CATEGORIES[category][component_type].copy()
|
432 |
-
if properties:
|
433 |
-
component_info["properties"].update(properties)
|
434 |
-
|
435 |
-
component = {
|
436 |
-
"id": len(self.components),
|
437 |
-
"category": category,
|
438 |
-
"type": component_type,
|
439 |
-
"properties": component_info["properties"]
|
440 |
-
}
|
441 |
-
|
442 |
-
self.components.append(component)
|
443 |
-
return component
|
444 |
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
|
|
|
|
|
|
|
|
|
|
450 |
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
|
|
|
|
|
|
|
|
|
|
456 |
|
457 |
-
|
458 |
-
|
459 |
-
return True
|
460 |
|
461 |
except Exception as e:
|
462 |
-
|
463 |
-
logger.error(error_msg)
|
464 |
-
self.error_log.append(error_msg)
|
465 |
-
return False
|
466 |
|
467 |
-
def
|
468 |
-
"""
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
self.components.pop(component_id)
|
474 |
-
# Update remaining component IDs
|
475 |
-
for i, component in enumerate(self.components):
|
476 |
-
component["id"] = i
|
477 |
-
return True
|
478 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
479 |
except Exception as e:
|
480 |
-
|
481 |
-
logger.error(error_msg)
|
482 |
-
self.error_log.append(error_msg)
|
483 |
-
return False
|
484 |
|
485 |
-
def
|
486 |
-
"""
|
487 |
-
|
488 |
-
|
489 |
-
raise ValueError(f"Invalid component ID: {component_id}")
|
490 |
-
return self.components[component_id]
|
491 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
492 |
except Exception as e:
|
493 |
-
|
494 |
-
logger.error(error_msg)
|
495 |
-
self.error_log.append(error_msg)
|
496 |
-
return None
|
497 |
|
498 |
-
def
|
499 |
-
"""
|
500 |
try:
|
501 |
-
|
|
|
|
|
|
|
|
|
502 |
except Exception as e:
|
503 |
-
|
504 |
-
logger.error(error_msg)
|
505 |
-
self.error_log.append(error_msg)
|
506 |
-
return "[]"
|
507 |
|
508 |
-
def
|
509 |
-
"""
|
510 |
try:
|
511 |
-
|
512 |
-
|
513 |
-
|
514 |
-
|
515 |
-
|
516 |
-
|
517 |
-
return
|
518 |
-
|
519 |
except Exception as e:
|
520 |
-
|
521 |
-
logger.error(error_msg)
|
522 |
-
self.error_log.append(error_msg)
|
523 |
-
return False
|
524 |
|
525 |
-
|
526 |
-
|
527 |
-
self.templates = TEMPLATE_CATEGORIES
|
528 |
-
self.error_log: List[str] = []
|
529 |
-
|
530 |
-
def get_template(self, category: str, template_name: str) -> Optional[Dict]:
|
531 |
-
"""Get a specific template by category and name"""
|
532 |
try:
|
533 |
-
|
534 |
-
raise ValueError(f"Invalid template: {category}/{template_name}")
|
535 |
-
return self.templates[category][template_name]
|
536 |
except Exception as e:
|
537 |
-
|
538 |
-
logger.error(error_msg)
|
539 |
-
self.error_log.append(error_msg)
|
540 |
-
return None
|
541 |
|
542 |
-
def
|
543 |
-
"""
|
544 |
try:
|
545 |
-
|
546 |
-
raise ValueError("Invalid template data structure")
|
547 |
-
|
548 |
-
if category not in self.templates:
|
549 |
-
self.templates[category] = {}
|
550 |
|
551 |
-
|
552 |
-
|
553 |
-
|
554 |
-
|
555 |
-
|
556 |
-
|
557 |
-
|
|
|
558 |
|
559 |
-
|
560 |
-
|
561 |
-
template_list = []
|
562 |
-
for category, templates in self.templates.items():
|
563 |
-
for name, data in templates.items():
|
564 |
-
template_list.append({
|
565 |
-
"category": category,
|
566 |
-
"name": name,
|
567 |
-
"description": data["description"]
|
568 |
-
})
|
569 |
-
return template_list
|
570 |
-
|
571 |
-
class CodeGenerator:
|
572 |
-
def __init__(self, template_manager: TemplateManager):
|
573 |
-
self.template_manager = template_manager
|
574 |
-
self.error_log: List[str] = []
|
575 |
-
|
576 |
-
def generate_app_code(self, components: List[Dict], layout: str = "vertical",
|
577 |
-
theme: str = "Light", css: str = "") -> str:
|
578 |
-
"""Generate complete Gradio application code"""
|
579 |
-
try:
|
580 |
-
code_parts = []
|
581 |
|
582 |
-
|
583 |
-
code_parts.append(self._generate_imports(components))
|
584 |
|
585 |
-
|
586 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
587 |
|
588 |
-
#
|
589 |
-
|
|
|
590 |
|
591 |
-
return "\n\n".join(code_parts)
|
592 |
except Exception as e:
|
593 |
-
|
594 |
-
|
595 |
-
|
596 |
-
|
597 |
-
|
598 |
-
|
599 |
-
|
600 |
-
|
601 |
-
|
602 |
-
# Add additional imports based on components
|
603 |
-
additional_imports = set()
|
604 |
-
for component in components:
|
605 |
-
if component["type"] == "Image":
|
606 |
-
additional_imports.add("from PIL import Image")
|
607 |
-
elif component["type"] in ["Audio", "Video"]:
|
608 |
-
additional_imports.add("import numpy as np")
|
609 |
-
|
610 |
-
return "\n".join(imports + sorted(list(additional_imports)))
|
611 |
-
|
612 |
-
def _generate_helper_functions(self, components: List[Dict]) -> str:
|
613 |
-
"""Generate helper functions needed by components"""
|
614 |
-
helper_functions = []
|
615 |
|
616 |
-
|
617 |
-
|
618 |
-
|
619 |
-
|
620 |
-
|
621 |
-
|
622 |
-
|
623 |
-
|
624 |
-
|
625 |
-
|
626 |
-
|
627 |
-
|
628 |
-
def process_audio(audio):
|
629 |
-
\"\"\"Process the input audio\"\"\"
|
630 |
-
# Add your audio processing logic here
|
631 |
-
return audio
|
632 |
-
""")
|
633 |
-
|
634 |
-
return "\n".join(helper_functions + sorted(list(processing_functions)))
|
635 |
-
|
636 |
-
def _generate_main_app_code(self, components: List[Dict], layout: str,
|
637 |
-
theme: str, css: str) -> str:
|
638 |
-
"""Generate the main application code"""
|
639 |
-
code_lines = [
|
640 |
-
"# Create Gradio interface",
|
641 |
-
"with gr.Blocks(theme=gr.themes.Default(), css=css) as demo:",
|
642 |
-
" gr.Markdown(\"# Gradio Application\")\n"
|
643 |
-
]
|
644 |
-
|
645 |
-
# Add CSS
|
646 |
-
if css:
|
647 |
-
code_lines.insert(0, f'css = """{css}"""\n')
|
648 |
-
|
649 |
-
# Generate layout
|
650 |
-
if layout == "horizontal":
|
651 |
-
code_lines.append(" with gr.Row():")
|
652 |
-
indent = " "
|
653 |
-
else:
|
654 |
-
code_lines.append(" with gr.Column():")
|
655 |
-
indent = " "
|
656 |
-
|
657 |
-
# Add components
|
658 |
-
component_vars = []
|
659 |
-
for i, component in enumerate(components):
|
660 |
-
var_name = f"{component['type'].lower()}_{i}"
|
661 |
-
component_vars.append(var_name)
|
662 |
|
663 |
-
|
664 |
-
|
665 |
-
|
666 |
-
|
667 |
-
|
668 |
-
|
669 |
-
|
670 |
-
|
671 |
-
|
672 |
-
|
673 |
-
|
674 |
-
" demo.launch()"
|
675 |
-
])
|
676 |
-
|
677 |
-
return "\n".join(code_lines)
|
678 |
-
|
679 |
-
def _generate_event_handlers(self, component_vars: List[str]) -> List[str]:
|
680 |
-
"""Generate event handlers for components"""
|
681 |
-
handlers = []
|
682 |
-
|
683 |
-
# Look for input/output pairs
|
684 |
-
for i in range(len(component_vars) - 1):
|
685 |
-
if "button" in component_vars[i]:
|
686 |
-
handlers.extend([
|
687 |
-
f"\n def process_{i}({component_vars[i-1]}):",
|
688 |
-
f" # Add your processing logic here",
|
689 |
-
f" return {component_vars[i-1]}",
|
690 |
-
f"\n {component_vars[i]}.click(",
|
691 |
-
f" fn=process_{i},",
|
692 |
-
f" inputs=[{component_vars[i-1]}],",
|
693 |
-
f" outputs=[{component_vars[i+1]}]",
|
694 |
-
f" )"
|
695 |
])
|
696 |
-
|
697 |
-
return handlers
|
698 |
-
|
699 |
-
class AppBuilder:
|
700 |
-
def __init__(self):
|
701 |
-
"""Initialize the AppBuilder with necessary managers"""
|
702 |
-
self.component_manager = ComponentManager()
|
703 |
-
self.template_manager = TemplateManager()
|
704 |
-
self.code_generator = CodeGenerator(self.template_manager)
|
705 |
-
self.ai_builder = None # Will be initialized later if needed
|
706 |
-
self.current_app = {
|
707 |
-
"components": [],
|
708 |
-
"layout": "vertical",
|
709 |
-
"theme": "Light",
|
710 |
-
"css": DEFAULT_THEMES["Light"]
|
711 |
-
}
|
712 |
-
self.error_log: List[str] = []
|
713 |
-
|
714 |
-
def create_app_from_template(self, category: str, template_name: str) -> bool:
|
715 |
-
"""Create a new app from a template"""
|
716 |
-
try:
|
717 |
-
template = self.template_manager.get_template(category, template_name)
|
718 |
-
if not template:
|
719 |
-
raise ValueError(f"Template not found: {category}/{template_name}")
|
720 |
-
|
721 |
-
self.current_app = {
|
722 |
-
"components": template["components"],
|
723 |
-
"layout": template["layout"],
|
724 |
-
"theme": "Light",
|
725 |
-
"css": template.get("css", DEFAULT_THEMES["Light"])
|
726 |
-
}
|
727 |
|
728 |
-
|
729 |
-
|
730 |
-
|
731 |
-
|
732 |
-
|
733 |
-
|
734 |
-
|
735 |
-
|
736 |
-
|
737 |
-
|
738 |
-
|
739 |
-
|
740 |
-
|
|
|
|
|
|
|
|
|
741 |
|
742 |
-
self.current_app["layout"] = layout
|
743 |
-
return True
|
744 |
except Exception as e:
|
745 |
-
|
746 |
-
|
747 |
-
|
748 |
-
|
749 |
-
|
750 |
-
|
751 |
-
|
|
|
752 |
try:
|
753 |
-
|
754 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
755 |
|
756 |
-
self.current_app["theme"] = theme
|
757 |
-
self.current_app["css"] = DEFAULT_THEMES[theme]
|
758 |
-
return True
|
759 |
except Exception as e:
|
760 |
-
|
761 |
-
logger.error(error_msg)
|
762 |
-
self.error_log.append(error_msg)
|
763 |
-
return False
|
764 |
|
765 |
-
def
|
766 |
-
"""
|
767 |
try:
|
768 |
-
|
769 |
-
|
770 |
-
|
771 |
-
|
772 |
-
|
773 |
-
)
|
|
|
|
|
|
|
|
|
774 |
except Exception as e:
|
775 |
-
|
776 |
-
logger.error(error_msg)
|
777 |
-
self.error_log.append(error_msg)
|
778 |
-
return ""
|
779 |
|
780 |
-
|
781 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
782 |
try:
|
783 |
-
|
784 |
-
|
785 |
-
raise ValueError("No code generated")
|
786 |
-
|
787 |
-
# Create temporary file
|
788 |
-
with tempfile.NamedTemporaryFile(mode='w', suffix='.py', delete=False) as f:
|
789 |
-
f.write(code)
|
790 |
-
temp_file = f.name
|
791 |
-
|
792 |
-
# Execute the code in a separate process
|
793 |
-
result = subprocess.run(['python', temp_file], capture_output=True, text=True)
|
794 |
|
795 |
-
|
796 |
-
|
797 |
-
|
798 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
799 |
except Exception as e:
|
800 |
-
|
801 |
-
logger.error(error_msg)
|
802 |
-
self.error_log.append(error_msg)
|
803 |
-
return None
|
804 |
|
805 |
-
|
806 |
-
|
807 |
-
|
808 |
-
|
809 |
-
|
810 |
-
|
811 |
-
|
812 |
-
|
813 |
-
|
814 |
-
|
815 |
-
|
816 |
-
|
817 |
-
|
818 |
-
|
819 |
-
|
820 |
-
|
821 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
822 |
|
823 |
-
|
824 |
-
|
825 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
826 |
|
827 |
-
|
828 |
-
|
829 |
-
|
|
|
|
|
|
|
|
|
|
|
830 |
|
831 |
-
|
832 |
-
|
833 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
834 |
|
835 |
-
|
836 |
-
|
837 |
-
|
838 |
-
|
839 |
-
|
840 |
-
|
841 |
-
|
842 |
-
|
843 |
-
|
844 |
-
|
845 |
-
|
846 |
-
|
847 |
-
def _create_builder_tab(self):
|
848 |
-
"""Create the component builder tab"""
|
849 |
-
with gr.Row():
|
850 |
-
# Left Column - Component Selection
|
851 |
-
with gr.Column(scale=1):
|
852 |
-
gr.Markdown("### 📦 Components")
|
853 |
-
category = gr.Dropdown(
|
854 |
-
choices=list(COMPONENT_CATEGORIES.keys()),
|
855 |
-
label="Category",
|
856 |
-
value="Basic"
|
857 |
-
)
|
858 |
-
component_type = gr.Dropdown(
|
859 |
-
choices=list(COMPONENT_CATEGORIES["Basic"].keys()),
|
860 |
-
label="Component Type"
|
861 |
-
)
|
862 |
-
add_component = gr.Button("Add Component", variant="primary")
|
863 |
-
|
864 |
-
# Middle Column - Component Properties
|
865 |
-
with gr.Column(scale=2):
|
866 |
-
gr.Markdown("### ⚙️ Properties")
|
867 |
-
properties_json = gr.JSON(label="Component Properties")
|
868 |
-
update_properties = gr.Button("Update Properties")
|
869 |
-
|
870 |
-
# Right Column - Component List
|
871 |
-
with gr.Column(scale=1):
|
872 |
-
gr.Markdown("### 📋 Current Components")
|
873 |
-
component_list = gr.JSON(label="Components")
|
874 |
-
remove_component = gr.Button("Remove Selected", variant="stop")
|
875 |
-
|
876 |
-
# Template Selection
|
877 |
-
with gr.Row():
|
878 |
-
gr.Markdown("### 📑 Templates")
|
879 |
-
template_category = gr.Dropdown(
|
880 |
-
choices=list(TEMPLATE_CATEGORIES.keys()),
|
881 |
-
label="Template Category"
|
882 |
-
)
|
883 |
-
template_name = gr.Dropdown(
|
884 |
-
choices=list(TEMPLATE_CATEGORIES["Basic"].keys()),
|
885 |
-
label="Template"
|
886 |
)
|
887 |
-
|
888 |
|
889 |
-
|
890 |
-
|
891 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
892 |
|
893 |
-
|
894 |
-
|
895 |
-
|
896 |
-
|
897 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
898 |
|
899 |
-
|
900 |
-
|
901 |
-
|
902 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
903 |
|
904 |
-
|
905 |
-
|
906 |
-
|
907 |
-
|
908 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
909 |
|
910 |
-
|
|
|
|
|
911 |
try:
|
912 |
-
|
913 |
-
|
914 |
-
|
915 |
-
)
|
916 |
-
|
917 |
-
|
918 |
-
|
919 |
-
|
920 |
-
|
921 |
-
|
922 |
-
update_component_properties,
|
923 |
-
inputs=[properties_json],
|
924 |
-
outputs=[component_list]
|
925 |
-
)
|
926 |
-
|
927 |
-
def _create_code_tab(self):
|
928 |
-
"""Create the code editor tab"""
|
929 |
-
with gr.Row():
|
930 |
-
with gr.Column():
|
931 |
-
code_editor = gr.Code(
|
932 |
-
label="Application Code",
|
933 |
-
language="python",
|
934 |
-
value="# Your Gradio app code will appear here"
|
935 |
)
|
936 |
-
update_code = gr.Button("Update Code")
|
937 |
|
938 |
-
|
939 |
-
|
940 |
-
|
941 |
-
|
942 |
-
|
943 |
-
|
944 |
-
|
|
|
|
|
|
|
|
|
945 |
except Exception as e:
|
946 |
-
return
|
947 |
|
948 |
-
|
949 |
-
|
950 |
-
|
951 |
-
|
952 |
-
|
953 |
-
|
954 |
-
|
955 |
-
|
956 |
-
|
957 |
-
|
958 |
-
|
959 |
-
|
960 |
-
|
961 |
-
|
962 |
-
|
963 |
try:
|
964 |
-
|
965 |
-
if success:
|
966 |
-
return "✅ Preview generated successfully"
|
967 |
-
return "❌ Error generating preview"
|
968 |
except Exception as e:
|
969 |
-
|
970 |
|
971 |
-
|
972 |
-
|
973 |
-
outputs=[preview_status]
|
974 |
-
)
|
975 |
-
def create_gradio_interface():
|
976 |
-
"""Create Gradio interface for the code executor"""
|
977 |
|
978 |
-
def
|
979 |
-
|
980 |
-
|
981 |
-
|
982 |
-
|
983 |
-
|
984 |
-
|
985 |
-
|
986 |
-
|
987 |
-
|
|
|
|
|
|
|
988 |
|
989 |
-
|
990 |
-
|
991 |
-
|
992 |
-
|
993 |
-
code_input = gr.Code(
|
994 |
-
label="Code Input",
|
995 |
-
language="python",
|
996 |
-
lines=10,
|
997 |
-
placeholder="Enter your Python code here..."
|
998 |
-
)
|
999 |
|
1000 |
-
|
1001 |
-
|
1002 |
-
|
1003 |
-
|
1004 |
-
|
1005 |
-
|
1006 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1007 |
)
|
1008 |
|
1009 |
-
|
1010 |
-
|
1011 |
-
|
1012 |
-
|
1013 |
-
execute_btn.click(
|
1014 |
-
fn=process_code,
|
1015 |
-
inputs=[code_input, autonomy_slider],
|
1016 |
-
outputs=output_json
|
1017 |
)
|
1018 |
|
1019 |
-
|
1020 |
-
|
1021 |
-
|
1022 |
-
|
1023 |
-
- Embedding Model: sentence-transformers/all-mpnet-base-v2
|
1024 |
-
- Temperature: 0.1
|
1025 |
-
- Max Length: 2048
|
1026 |
-
""")
|
1027 |
-
|
1028 |
-
with gr.Tab("Help"):
|
1029 |
-
gr.Markdown("""
|
1030 |
-
### How to Use
|
1031 |
-
1. Enter your Python code in the Code Input area
|
1032 |
-
2. Adjust the Autonomy Level:
|
1033 |
-
- 0: No automatic fixes
|
1034 |
-
- 5: Balanced approach
|
1035 |
-
- 10: Fully autonomous operation
|
1036 |
-
3. Click "Execute Code" to run
|
1037 |
-
|
1038 |
-
### Features
|
1039 |
-
- Automatic code analysis
|
1040 |
-
- Error detection and fixing
|
1041 |
-
- Code formatting
|
1042 |
-
- Syntax validation
|
1043 |
-
""")
|
1044 |
-
|
1045 |
-
return interface
|
1046 |
-
|
1047 |
-
def create_gradio_interface():
|
1048 |
-
"""Create Gradio interface for the code executor"""
|
1049 |
-
|
1050 |
-
def process_code(code: str, autonomy_level: int) -> Dict:
|
1051 |
-
executor.set_autonomy_level(autonomy_level)
|
1052 |
-
result = executor.execute_code(code)
|
1053 |
-
return {
|
1054 |
-
"Success": result["success"],
|
1055 |
-
"Output": result["output"],
|
1056 |
-
"Error": result["error"] or "None",
|
1057 |
-
"Fixed Code": result["fixed_code"] or code,
|
1058 |
-
"Iterations": result["iterations"]
|
1059 |
-
}
|
1060 |
-
|
1061 |
-
with gr.Blocks() as interface:
|
1062 |
-
gr.Markdown("# 🤖 Autonomous Code Executor")
|
1063 |
-
|
1064 |
-
with gr.Tab("Code Execution"):
|
1065 |
-
# Corrected Code component initialization
|
1066 |
-
code_input = gr.Code(
|
1067 |
-
label="Code Input",
|
1068 |
-
language="python",
|
1069 |
-
lines=10,
|
1070 |
-
value="# Enter your Python code here..." # Using value instead of placeholder
|
1071 |
)
|
1072 |
|
1073 |
-
|
1074 |
-
|
1075 |
-
|
1076 |
-
|
1077 |
-
value=5,
|
1078 |
-
label="Autonomy Level",
|
1079 |
-
info="0: No fixes, 5: Balanced, 10: Fully autonomous"
|
1080 |
)
|
1081 |
|
1082 |
-
|
1083 |
-
|
1084 |
-
|
1085 |
-
|
1086 |
-
gr.Examples(
|
1087 |
-
examples=[
|
1088 |
-
["def hello():\n print('Hello, World!')", 5],
|
1089 |
-
["for i in range(5):\n print(i)", 5]
|
1090 |
-
],
|
1091 |
-
inputs=[code_input, autonomy_slider],
|
1092 |
-
outputs=output_json,
|
1093 |
-
label="Example Code"
|
1094 |
)
|
1095 |
|
1096 |
-
|
1097 |
-
fn=
|
1098 |
-
inputs=[
|
1099 |
-
outputs=
|
1100 |
)
|
1101 |
|
1102 |
-
|
1103 |
-
|
1104 |
-
|
1105 |
-
|
1106 |
-
- Embedding Model: sentence-transformers/all-mpnet-base-v2
|
1107 |
-
- Temperature: 0.1
|
1108 |
-
- Max Length: 2048
|
1109 |
-
""")
|
1110 |
|
1111 |
-
|
1112 |
-
|
1113 |
-
|
1114 |
-
|
1115 |
-
|
1116 |
-
|
1117 |
-
|
1118 |
-
|
1119 |
-
|
|
|
1120 |
|
1121 |
-
|
1122 |
-
|
1123 |
-
|
1124 |
-
|
1125 |
-
|
1126 |
-
|
1127 |
-
|
1128 |
-
return interface
|
1129 |
|
1130 |
-
|
1131 |
-
interface = create_gradio_interface()
|
1132 |
|
1133 |
-
|
1134 |
-
|
1135 |
-
|
1136 |
-
|
1137 |
-
|
1138 |
-
|
1139 |
-
|
1140 |
-
|
1141 |
-
|
1142 |
-
|
1143 |
-
|
1144 |
-
|
1145 |
-
|
1146 |
-
|
1147 |
-
|
1148 |
-
|
1149 |
-
|
1150 |
-
|
1151 |
-
|
|
|
|
|
1152 |
|
1153 |
-
|
1154 |
-
|
1155 |
-
|
1156 |
-
|
1157 |
-
|
1158 |
-
|
1159 |
-
|
1160 |
-
def get_next_question(command, step, previous_answer=None):
|
1161 |
-
"""Generate next question based on command and current step"""
|
1162 |
-
questions = {
|
1163 |
-
"create": [
|
1164 |
-
"Do you need user input components?",
|
1165 |
-
"Do you need data processing functionality?",
|
1166 |
-
"Would you like to add styling/themes?"
|
1167 |
-
],
|
1168 |
-
"component": [
|
1169 |
-
"Is this for user input?",
|
1170 |
-
"Do you need media handling (image/audio/video)?",
|
1171 |
-
"Should the component have real-time updates?"
|
1172 |
-
],
|
1173 |
-
"layout": [
|
1174 |
-
"Do you want a multi-tab layout?",
|
1175 |
-
"Do you need responsive design?",
|
1176 |
-
"Should components be arranged horizontally?"
|
1177 |
-
],
|
1178 |
-
"style": [
|
1179 |
-
"Do you want a dark theme?",
|
1180 |
-
"Do you need custom CSS?",
|
1181 |
-
"Should components have rounded corners?"
|
1182 |
-
],
|
1183 |
-
"data": [
|
1184 |
-
"Will you be handling file uploads?",
|
1185 |
-
"Do you need data visualization?",
|
1186 |
-
"Should data be stored persistently?"
|
1187 |
-
],
|
1188 |
-
"api": [
|
1189 |
-
"Do you need authentication for API?",
|
1190 |
-
"Will you be handling JSON data?",
|
1191 |
-
"Do you need error handling?"
|
1192 |
-
],
|
1193 |
-
"auth": [
|
1194 |
-
"Do you need user registration?",
|
1195 |
-
"Should sessions be persistent?",
|
1196 |
-
"Do you need role-based access?"
|
1197 |
-
],
|
1198 |
-
"file": [
|
1199 |
-
"Will you handle multiple file types?",
|
1200 |
-
"Do you need file preprocessing?",
|
1201 |
-
"Should files be stored locally?"
|
1202 |
-
],
|
1203 |
-
"viz": [
|
1204 |
-
"Do you need interactive plots?",
|
1205 |
-
"Will you use real-time data?",
|
1206 |
-
"Do you need multiple chart types?"
|
1207 |
-
],
|
1208 |
-
"db": [
|
1209 |
-
"Do you need real-time updates?",
|
1210 |
-
"Will you use SQL database?",
|
1211 |
-
"Do you need data caching?"
|
1212 |
-
]
|
1213 |
-
}
|
1214 |
-
|
1215 |
-
if step < len(questions[command]):
|
1216 |
-
return questions[command][step]
|
1217 |
-
return None
|
1218 |
|
1219 |
-
def
|
1220 |
-
"""
|
1221 |
-
|
1222 |
-
|
1223 |
-
```python
|
1224 |
-
import gradio as gr
|
1225 |
|
1226 |
-
def
|
1227 |
-
|
1228 |
-
|
|
|
1229 |
|
1230 |
-
|
1231 |
-
|
1232 |
-
|
1233 |
-
|
|
|
|
|
|
|
1234 |
|
1235 |
-
|
1236 |
-
|
1237 |
-
|
1238 |
-
|
1239 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1240 |
|
1241 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1242 |
|
1243 |
-
|
1244 |
-
|
1245 |
-
|
1246 |
-
|
1247 |
|
1248 |
-
|
1249 |
-
|
1250 |
-
|
1251 |
-
|
1252 |
-
|
1253 |
-
|
1254 |
-
|
1255 |
-
|
1256 |
-
|
1257 |
-
return base_templates[command].format(
|
1258 |
-
input_params=input_params,
|
1259 |
-
processing_logic=processing_logic,
|
1260 |
-
style_params=style_params,
|
1261 |
-
components=components,
|
1262 |
-
layout=layout,
|
1263 |
-
event_handlers=event_handlers
|
1264 |
-
)
|
1265 |
|
1266 |
-
def
|
1267 |
-
"""
|
1268 |
try:
|
1269 |
-
|
1270 |
-
|
1271 |
-
|
1272 |
-
|
1273 |
-
|
1274 |
-
|
1275 |
-
|
1276 |
-
|
1277 |
-
|
1278 |
-
|
1279 |
-
|
1280 |
-
|
1281 |
-
|
1282 |
-
|
1283 |
-
if next_question:
|
1284 |
-
# More questions to ask
|
1285 |
-
return history + [[answer, next_question]], True, next_question
|
1286 |
-
else:
|
1287 |
-
# Generate final code
|
1288 |
-
code = generate_code(chat_state.command, chat_state.context)
|
1289 |
-
return history + [[answer, f"Here's your code:\n{code}"]], False, None
|
1290 |
-
|
1291 |
-
return history, True, None
|
1292 |
-
|
1293 |
except Exception as e:
|
1294 |
-
|
1295 |
-
|
1296 |
-
return history + [[None, error_msg]], False, None
|
1297 |
-
|
1298 |
-
def reset_chat():
|
1299 |
-
"""Reset chat state and history"""
|
1300 |
-
chat_state.reset()
|
1301 |
-
return None, None, False, None
|
1302 |
-
|
1303 |
-
# Set up event handlers
|
1304 |
-
send_btn.click(
|
1305 |
-
process_chat,
|
1306 |
-
inputs=[command_input, user_input, chat_history],
|
1307 |
-
outputs=[chat_history, user_input, user_input]
|
1308 |
-
)
|
1309 |
-
|
1310 |
-
restart_btn.click(
|
1311 |
-
reset_chat,
|
1312 |
-
outputs=[chat_history, command_input, user_input, user_input]
|
1313 |
-
)
|
1314 |
-
def _create_settings_tab(self):
|
1315 |
-
"""Create the settings tab"""
|
1316 |
-
with gr.Row():
|
1317 |
-
with gr.Column():
|
1318 |
-
gr.Markdown("### 🎨 Appearance")
|
1319 |
-
theme_dropdown = gr.Dropdown(
|
1320 |
-
choices=list(DEFAULT_THEMES.keys()),
|
1321 |
-
label="Theme",
|
1322 |
-
value="Light"
|
1323 |
-
)
|
1324 |
-
layout_dropdown = gr.Dropdown(
|
1325 |
-
choices=["vertical", "horizontal", "tabs"],
|
1326 |
-
label="Layout",
|
1327 |
-
value="vertical"
|
1328 |
-
)
|
1329 |
-
|
1330 |
-
with gr.Column():
|
1331 |
-
gr.Markdown("### 📝 Custom CSS")
|
1332 |
-
css_editor = gr.Code(
|
1333 |
-
label="Custom CSS",
|
1334 |
-
language="css",
|
1335 |
-
value=DEFAULT_THEMES["Light"]
|
1336 |
-
)
|
1337 |
|
1338 |
-
|
1339 |
-
|
1340 |
-
|
1341 |
-
|
1342 |
-
|
1343 |
-
|
1344 |
-
self.app_builder.current_app["css"] = css
|
1345 |
-
return "✅ Settings updated successfully"
|
1346 |
-
except Exception as e:
|
1347 |
-
return f"❌ Error updating settings: {str(e)}"
|
1348 |
|
1349 |
-
|
1350 |
-
|
1351 |
-
|
1352 |
-
|
1353 |
-
|
|
|
|
|
1354 |
|
1355 |
def main():
|
1356 |
-
"""Main
|
1357 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1358 |
interface = GradioInterface()
|
1359 |
-
|
1360 |
-
demo.launch(
|
1361 |
-
server_name="0.0.0.0",
|
1362 |
-
server_port=7860,
|
1363 |
share=True,
|
1364 |
-
debug=True
|
|
|
1365 |
)
|
|
|
1366 |
except Exception as e:
|
1367 |
-
logger.error(f"Application
|
1368 |
raise
|
1369 |
|
1370 |
if __name__ == "__main__":
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
|
4 |
+
"""
|
5 |
+
Gradio Interface Builder
|
6 |
+
A tool for building and managing Gradio interfaces using AI-powered code generation
|
7 |
+
"""
|
8 |
+
|
9 |
+
import threading
|
10 |
+
import time
|
11 |
import gradio as gr
|
12 |
import logging
|
13 |
import json
|
|
|
15 |
import torch
|
16 |
import tempfile
|
17 |
import subprocess
|
18 |
+
import ast
|
19 |
from pathlib import Path
|
20 |
+
from typing import Dict, List, Tuple, Optional, Any, Union
|
21 |
+
from dataclasses import dataclass, field
|
22 |
from enum import Enum
|
23 |
+
from transformers import (
|
24 |
+
AutoTokenizer,
|
25 |
+
AutoModelForCausalLM,
|
26 |
+
pipeline,
|
27 |
+
AutoProcessor,
|
28 |
+
AutoModel
|
29 |
+
)
|
30 |
+
from sentence_transformers import SentenceTransformer
|
31 |
+
import faiss
|
32 |
+
import numpy as np
|
33 |
+
from PIL import Image
|
34 |
|
35 |
# Configure logging
|
36 |
logging.basicConfig(
|
37 |
level=logging.INFO,
|
38 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
39 |
handlers=[
|
40 |
logging.StreamHandler(),
|
41 |
logging.FileHandler('gradio_builder.log')
|
|
|
43 |
)
|
44 |
logger = logging.getLogger(__name__)
|
45 |
|
46 |
+
# Constants
|
47 |
+
DEFAULT_PORT = 7860
|
48 |
+
MODEL_CACHE_DIR = Path("model_cache")
|
49 |
+
TEMPLATE_DIR = Path("templates")
|
50 |
+
TEMP_DIR = Path("temp")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
+
# Ensure directories exist
|
53 |
+
for directory in [MODEL_CACHE_DIR, TEMPLATE_DIR, TEMP_DIR]:
|
54 |
+
directory.mkdir(exist_ok=True)
|
|
|
55 |
|
56 |
@dataclass
|
57 |
+
class Template:
|
58 |
+
"""Template data structure"""
|
59 |
+
code: str
|
60 |
+
description: str
|
61 |
+
components: List[str]
|
62 |
+
metadata: Dict[str, Any] = field(default_factory=dict)
|
63 |
+
version: str = "1.0"
|
64 |
+
|
65 |
+
class ComponentType(Enum):
|
66 |
+
"""Supported Gradio component types"""
|
67 |
+
IMAGE = "Image"
|
68 |
+
TEXTBOX = "Textbox"
|
69 |
+
BUTTON = "Button"
|
70 |
+
NUMBER = "Number"
|
71 |
+
MARKDOWN = "Markdown"
|
72 |
+
JSON = "JSON"
|
73 |
+
HTML = "HTML"
|
74 |
+
CODE = "Code"
|
75 |
+
DROPDOWN = "Dropdown"
|
76 |
+
SLIDER = "Slider"
|
77 |
+
CHECKBOX = "Checkbox"
|
78 |
+
RADIO = "Radio"
|
79 |
+
AUDIO = "Audio"
|
80 |
+
VIDEO = "Video"
|
81 |
+
FILE = "File"
|
82 |
+
DATAFRAME = "DataFrame"
|
83 |
+
LABEL = "Label"
|
84 |
+
PLOT = "Plot"
|
85 |
|
86 |
+
@dataclass
|
87 |
+
class ComponentConfig:
|
88 |
+
"""Configuration for Gradio components"""
|
89 |
+
type: ComponentType
|
90 |
+
label: str
|
91 |
+
properties: Dict[str, Any] = field(default_factory=dict)
|
92 |
+
events: List[str] = field(default_factory=list)
|
93 |
+
|
94 |
+
class BuilderError(Exception):
|
95 |
+
"""Base exception for Gradio Builder errors"""
|
96 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
97 |
|
98 |
+
class ValidationError(BuilderError):
|
99 |
+
"""Raised when validation fails"""
|
100 |
+
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
+
class GenerationError(BuilderError):
|
103 |
+
"""Raised when code generation fails"""
|
104 |
+
pass
|
|
|
|
|
105 |
|
106 |
+
class ModelError(BuilderError):
|
107 |
+
"""Raised when model operations fail"""
|
108 |
+
pass
|
109 |
+
|
110 |
+
def setup_gpu_memory():
|
111 |
+
"""Configure GPU memory usage"""
|
112 |
+
try:
|
113 |
+
if torch.cuda.is_available():
|
114 |
+
# Enable memory growth
|
115 |
+
torch.cuda.empty_cache()
|
116 |
+
# Set memory fraction
|
117 |
+
torch.cuda.set_per_process_memory_fraction(0.8)
|
118 |
+
logger.info("GPU memory configured successfully")
|
119 |
+
else:
|
120 |
+
logger.info("No GPU available, using CPU")
|
121 |
+
except Exception as e:
|
122 |
+
logger.warning(f"Error configuring GPU memory: {e}")
|
123 |
+
|
124 |
+
def validate_code(code: str) -> Tuple[bool, str]:
|
125 |
+
"""Validate Python code syntax"""
|
126 |
+
try:
|
127 |
+
ast.parse(code)
|
128 |
+
return True, "Code is valid"
|
129 |
+
except SyntaxError as e:
|
130 |
+
line_no = e.lineno
|
131 |
+
offset = e.offset
|
132 |
+
line = e.text
|
133 |
+
if line:
|
134 |
+
pointer = " " * (offset - 1) + "^"
|
135 |
+
error_detail = f"\nLine {line_no}:\n{line}\n{pointer}"
|
136 |
+
else:
|
137 |
+
error_detail = f" at line {line_no}"
|
138 |
+
return False, f"Syntax error: {str(e)}{error_detail}"
|
139 |
+
except Exception as e:
|
140 |
+
return False, f"Validation error: {str(e)}"
|
141 |
|
142 |
+
class CodeFormatter:
|
143 |
+
"""Handles code formatting and cleanup"""
|
144 |
+
|
145 |
+
@staticmethod
|
146 |
+
def format_code(code: str) -> str:
|
147 |
+
"""Format code using black"""
|
148 |
try:
|
149 |
+
import black
|
150 |
+
return black.format_str(code, mode=black.FileMode())
|
151 |
+
except ImportError:
|
152 |
+
logger.warning("black not installed, returning unformatted code")
|
153 |
+
return code
|
154 |
+
except Exception as e:
|
155 |
+
logger.error(f"Error formatting code: {e}")
|
156 |
+
return code
|
157 |
+
|
158 |
+
@staticmethod
|
159 |
+
def cleanup_code(code: str) -> str:
|
160 |
+
"""Clean up generated code"""
|
161 |
+
# Remove any potential unsafe imports
|
162 |
+
unsafe_imports = ['os', 'subprocess', 'sys']
|
163 |
+
lines = code.split('\n')
|
164 |
+
cleaned_lines = []
|
165 |
+
|
166 |
+
for line in lines:
|
167 |
+
skip = False
|
168 |
+
for unsafe in unsafe_imports:
|
169 |
+
if f"import {unsafe}" in line or f"from {unsafe}" in line:
|
170 |
+
skip = True
|
171 |
+
break
|
172 |
+
if not skip:
|
173 |
+
cleaned_lines.append(line)
|
174 |
+
|
175 |
+
return '\n'.join(cleaned_lines)
|
176 |
|
177 |
+
def create_temp_module(code: str) -> str:
|
178 |
+
"""Create a temporary module from code"""
|
179 |
+
try:
|
180 |
+
temp_file = TEMP_DIR / f"temp_module_{int(time.time())}.py"
|
181 |
+
with open(temp_file, "w", encoding="utf-8") as f:
|
182 |
+
f.write(code)
|
183 |
+
return str(temp_file)
|
184 |
+
except Exception as e:
|
185 |
+
raise BuilderError(f"Failed to create temporary module: {e}")
|
186 |
|
187 |
+
# Initialize GPU configuration
|
188 |
+
setup_gpu_memory()
|
189 |
|
190 |
+
class ModelManager:
|
191 |
+
"""Manages AI models and their configurations"""
|
192 |
+
|
193 |
+
def __init__(self, cache_dir: Path = MODEL_CACHE_DIR):
|
194 |
+
self.cache_dir = cache_dir
|
195 |
+
self.cache_dir.mkdir(exist_ok=True)
|
196 |
+
self.loaded_models = {}
|
197 |
+
self.model_configs = {
|
198 |
+
"code_generator": {
|
199 |
+
"model_id": "bigcode/starcoder",
|
200 |
+
"tokenizer": AutoTokenizer,
|
201 |
+
"model": AutoModelForCausalLM,
|
202 |
+
"kwargs": {
|
203 |
+
"torch_dtype": torch.float16,
|
204 |
+
"device_map": "auto",
|
205 |
+
"cache_dir": str(cache_dir)
|
206 |
+
}
|
207 |
+
},
|
208 |
+
"image_processor": {
|
209 |
+
"model_id": "Salesforce/blip-image-captioning-base",
|
210 |
+
"processor": AutoProcessor,
|
211 |
+
"model": AutoModel,
|
212 |
+
"kwargs": {
|
213 |
+
"cache_dir": str(cache_dir)
|
214 |
+
}
|
215 |
}
|
216 |
+
}
|
217 |
|
218 |
+
def load_model(self, model_type: str):
|
219 |
+
"""Load a model by type"""
|
220 |
try:
|
221 |
+
if model_type not in self.model_configs:
|
222 |
+
raise ModelError(f"Unknown model type: {model_type}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
223 |
|
224 |
+
if model_type in self.loaded_models:
|
225 |
+
return self.loaded_models[model_type]
|
|
|
|
|
|
|
226 |
|
227 |
+
config = self.model_configs[model_type]
|
228 |
+
logger.info(f"Loading {model_type} model...")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
|
230 |
+
if model_type == "code_generator":
|
231 |
+
tokenizer = config["tokenizer"].from_pretrained(
|
232 |
+
config["model_id"],
|
233 |
+
**config["kwargs"]
|
234 |
+
)
|
235 |
+
model = config["model"].from_pretrained(
|
236 |
+
config["model_id"],
|
237 |
+
**config["kwargs"]
|
238 |
+
)
|
239 |
+
self.loaded_models[model_type] = (model, tokenizer)
|
240 |
|
241 |
+
elif model_type == "image_processor":
|
242 |
+
processor = config["processor"].from_pretrained(
|
243 |
+
config["model_id"],
|
244 |
+
**config["kwargs"]
|
245 |
+
)
|
246 |
+
model = config["model"].from_pretrained(
|
247 |
+
config["model_id"],
|
248 |
+
**config["kwargs"]
|
249 |
+
)
|
250 |
+
self.loaded_models[model_type] = (model, processor)
|
251 |
|
252 |
+
logger.info(f"{model_type} model loaded successfully")
|
253 |
+
return self.loaded_models[model_type]
|
|
|
254 |
|
255 |
except Exception as e:
|
256 |
+
raise ModelError(f"Error loading {model_type} model: {str(e)}")
|
|
|
|
|
|
|
257 |
|
258 |
+
def unload_model(self, model_type: str):
|
259 |
+
"""Unload a model to free memory"""
|
260 |
+
if model_type in self.loaded_models:
|
261 |
+
del self.loaded_models[model_type]
|
262 |
+
torch.cuda.empty_cache()
|
263 |
+
logger.info(f"{model_type} model unloaded")
|
|
|
|
|
|
|
|
|
|
|
264 |
|
265 |
+
class MultimodalRAG:
|
266 |
+
"""Multimodal Retrieval-Augmented Generation system"""
|
267 |
+
|
268 |
+
def __init__(self):
|
269 |
+
"""Initialize the multimodal RAG system"""
|
270 |
+
try:
|
271 |
+
self.model_manager = ModelManager()
|
272 |
+
|
273 |
+
# Load text encoder
|
274 |
+
self.text_encoder = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2')
|
275 |
+
|
276 |
+
# Initialize vector store
|
277 |
+
self.vector_store = self._initialize_vector_store()
|
278 |
+
|
279 |
+
# Load template database
|
280 |
+
self.template_embeddings = {}
|
281 |
+
self._initialize_template_embeddings()
|
282 |
+
|
283 |
except Exception as e:
|
284 |
+
raise ModelError(f"Error initializing MultimodalRAG: {str(e)}")
|
|
|
|
|
|
|
285 |
|
286 |
+
def _initialize_vector_store(self) -> faiss.IndexFlatL2:
|
287 |
+
"""Initialize FAISS vector store"""
|
288 |
+
combined_dim = 768 + 384 # BLIP (768) + text (384)
|
289 |
+
return faiss.IndexFlatL2(combined_dim)
|
|
|
|
|
290 |
|
291 |
+
def _initialize_template_embeddings(self):
|
292 |
+
"""Initialize template embeddings"""
|
293 |
+
try:
|
294 |
+
template_path = TEMPLATE_DIR / "template_embeddings.npz"
|
295 |
+
if template_path.exists():
|
296 |
+
data = np.load(template_path)
|
297 |
+
self.template_embeddings = {
|
298 |
+
name: embedding for name, embedding in data.items()
|
299 |
+
}
|
300 |
except Exception as e:
|
301 |
+
logger.error(f"Error loading template embeddings: {e}")
|
|
|
|
|
|
|
302 |
|
303 |
+
def save_template_embeddings(self):
|
304 |
+
"""Save template embeddings to disk"""
|
305 |
try:
|
306 |
+
template_path = TEMPLATE_DIR / "template_embeddings.npz"
|
307 |
+
np.savez(
|
308 |
+
template_path,
|
309 |
+
**self.template_embeddings
|
310 |
+
)
|
311 |
except Exception as e:
|
312 |
+
logger.error(f"Error saving template embeddings: {e}")
|
|
|
|
|
|
|
313 |
|
314 |
+
def encode_image(self, image: Image.Image) -> np.ndarray:
|
315 |
+
"""Encode image using BLIP"""
|
316 |
try:
|
317 |
+
model, processor = self.model_manager.load_model("image_processor")
|
318 |
+
|
319 |
+
inputs = processor(images=image, return_tensors="pt")
|
320 |
+
with torch.no_grad():
|
321 |
+
image_features = model.get_image_features(**inputs)
|
322 |
+
|
323 |
+
return image_features.detach().numpy()
|
324 |
+
|
325 |
except Exception as e:
|
326 |
+
raise ModelError(f"Error encoding image: {str(e)}")
|
|
|
|
|
|
|
327 |
|
328 |
+
def encode_text(self, text: str) -> np.ndarray:
|
329 |
+
"""Encode text using sentence-transformers"""
|
|
|
|
|
|
|
|
|
|
|
330 |
try:
|
331 |
+
return self.text_encoder.encode(text)
|
|
|
|
|
332 |
except Exception as e:
|
333 |
+
raise ModelError(f"Error encoding text: {str(e)}")
|
|
|
|
|
|
|
334 |
|
335 |
+
def generate_code(self, description: str, template_code: str) -> str:
|
336 |
+
"""Generate code using StarCoder"""
|
337 |
try:
|
338 |
+
model, tokenizer = self.model_manager.load_model("code_generator")
|
|
|
|
|
|
|
|
|
339 |
|
340 |
+
prompt = f"""
|
341 |
+
# Task: Generate a Gradio interface based on the description
|
342 |
+
# Description: {description}
|
343 |
+
# Base template:
|
344 |
+
{template_code}
|
345 |
+
|
346 |
+
# Generate a customized version of the template that implements the description.
|
347 |
+
# Only output the Python code, no explanations.
|
348 |
|
349 |
+
```python
|
350 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
351 |
|
352 |
+
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
|
|
353 |
|
354 |
+
with torch.no_grad():
|
355 |
+
outputs = model.generate(
|
356 |
+
inputs.input_ids,
|
357 |
+
max_length=2048,
|
358 |
+
temperature=0.2,
|
359 |
+
top_p=0.95,
|
360 |
+
do_sample=True,
|
361 |
+
pad_token_id=tokenizer.eos_token_id
|
362 |
+
)
|
363 |
+
|
364 |
+
generated_code = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
365 |
|
366 |
+
# Clean and format the generated code
|
367 |
+
generated_code = self._clean_generated_code(generated_code)
|
368 |
+
return CodeFormatter.format_code(generated_code)
|
369 |
|
|
|
370 |
except Exception as e:
|
371 |
+
raise GenerationError(f"Error generating code: {str(e)}")
|
372 |
+
|
373 |
+
def _clean_generated_code(self, code: str) -> str:
|
374 |
+
"""Clean and format generated code"""
|
375 |
+
# Extract code between triple backticks if present
|
376 |
+
if "```python" in code:
|
377 |
+
code = code.split("```python")[1].split("```")[0]
|
378 |
+
elif "```" in code:
|
379 |
+
code = code.split("```")[1].split("```")[0]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
380 |
|
381 |
+
code = code.strip()
|
382 |
+
return CodeFormatter.cleanup_code(code)
|
383 |
+
|
384 |
+
def find_similar_template(
|
385 |
+
self,
|
386 |
+
screenshot: Optional[Image.Image],
|
387 |
+
description: str
|
388 |
+
) -> Tuple[str, Template]:
|
389 |
+
"""Find most similar template based on image and description"""
|
390 |
+
try:
|
391 |
+
# Get embeddings
|
392 |
+
text_embedding = self.encode_text(description)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
393 |
|
394 |
+
if screenshot:
|
395 |
+
img_embedding = self.encode_image(screenshot)
|
396 |
+
query_embedding = np.concatenate([
|
397 |
+
img_embedding.flatten(),
|
398 |
+
text_embedding
|
399 |
+
])
|
400 |
+
else:
|
401 |
+
# If no image, duplicate text embedding to match dimensions
|
402 |
+
query_embedding = np.concatenate([
|
403 |
+
text_embedding,
|
404 |
+
text_embedding
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
405 |
])
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
406 |
|
407 |
+
# Search in vector store
|
408 |
+
D, I = self.vector_store.search(
|
409 |
+
np.array([query_embedding]),
|
410 |
+
k=1
|
411 |
+
)
|
412 |
+
|
413 |
+
# Get template name from index
|
414 |
+
template_names = list(self.template_embeddings.keys())
|
415 |
+
template_name = template_names[I[0][0]]
|
416 |
+
|
417 |
+
# Load template
|
418 |
+
template_path = TEMPLATE_DIR / f"{template_name}.json"
|
419 |
+
with open(template_path, 'r') as f:
|
420 |
+
template_data = json.load(f)
|
421 |
+
template = Template(**template_data)
|
422 |
+
|
423 |
+
return template_name, template
|
424 |
|
|
|
|
|
425 |
except Exception as e:
|
426 |
+
raise ModelError(f"Error finding similar template: {str(e)}")
|
427 |
+
|
428 |
+
def generate_interface(
|
429 |
+
self,
|
430 |
+
screenshot: Optional[Image.Image],
|
431 |
+
description: str
|
432 |
+
) -> str:
|
433 |
+
"""Generate complete interface based on input"""
|
434 |
try:
|
435 |
+
# Find similar template
|
436 |
+
template_name, template = self.find_similar_template(
|
437 |
+
screenshot,
|
438 |
+
description
|
439 |
+
)
|
440 |
+
|
441 |
+
# Generate customized code
|
442 |
+
custom_code = self.generate_code(
|
443 |
+
description,
|
444 |
+
template.code
|
445 |
+
)
|
446 |
+
|
447 |
+
return custom_code
|
448 |
|
|
|
|
|
|
|
449 |
except Exception as e:
|
450 |
+
raise GenerationError(f"Error generating interface: {str(e)}")
|
|
|
|
|
|
|
451 |
|
452 |
+
def cleanup(self):
|
453 |
+
"""Cleanup resources"""
|
454 |
try:
|
455 |
+
# Save template embeddings
|
456 |
+
self.save_template_embeddings()
|
457 |
+
|
458 |
+
# Unload models
|
459 |
+
self.model_manager.unload_model("code_generator")
|
460 |
+
self.model_manager.unload_model("image_processor")
|
461 |
+
|
462 |
+
# Clear CUDA cache
|
463 |
+
torch.cuda.empty_cache()
|
464 |
+
|
465 |
except Exception as e:
|
466 |
+
logger.error(f"Error during cleanup: {e}")
|
|
|
|
|
|
|
467 |
|
468 |
+
class TemplateManager:
|
469 |
+
"""Manages Gradio interface templates"""
|
470 |
+
|
471 |
+
def __init__(self, template_dir: Path = TEMPLATE_DIR):
|
472 |
+
self.template_dir = template_dir
|
473 |
+
self.template_dir.mkdir(exist_ok=True)
|
474 |
+
self.templates: Dict[str, Template] = {}
|
475 |
+
self.load_templates()
|
476 |
+
|
477 |
+
def load_templates(self):
|
478 |
+
"""Load all templates from directory"""
|
479 |
try:
|
480 |
+
# Load built-in templates
|
481 |
+
self.templates.update(self._get_builtin_templates())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
482 |
|
483 |
+
# Load custom templates
|
484 |
+
for template_file in self.template_dir.glob("*.json"):
|
485 |
+
try:
|
486 |
+
with open(template_file, 'r', encoding='utf-8') as f:
|
487 |
+
template_data = json.load(f)
|
488 |
+
name = template_file.stem
|
489 |
+
self.templates[name] = Template(**template_data)
|
490 |
+
except Exception as e:
|
491 |
+
logger.error(f"Error loading template {template_file}: {e}")
|
492 |
+
|
493 |
except Exception as e:
|
494 |
+
logger.error(f"Error loading templates: {e}")
|
|
|
|
|
|
|
495 |
|
496 |
+
def _get_builtin_templates(self) -> Dict[str, Template]:
|
497 |
+
"""Get built-in templates"""
|
498 |
+
return {
|
499 |
+
"image_classifier": Template(
|
500 |
+
code="""
|
501 |
+
import gradio as gr
|
502 |
+
import numpy as np
|
503 |
+
from PIL import Image
|
504 |
+
|
505 |
+
def classify_image(image):
|
506 |
+
if image is None:
|
507 |
+
return {"error": 1.0}
|
508 |
+
# Add classification logic here
|
509 |
+
return {"class1": 0.8, "class2": 0.2}
|
510 |
+
|
511 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
512 |
+
gr.Markdown("# Image Classifier")
|
513 |
+
with gr.Row():
|
514 |
+
with gr.Column():
|
515 |
+
input_image = gr.Image(type="pil")
|
516 |
+
classify_btn = gr.Button("Classify")
|
517 |
+
with gr.Column():
|
518 |
+
output_labels = gr.Label()
|
519 |
|
520 |
+
classify_btn.click(
|
521 |
+
fn=classify_image,
|
522 |
+
inputs=input_image,
|
523 |
+
outputs=output_labels
|
524 |
+
)
|
525 |
+
|
526 |
+
if __name__ == "__main__":
|
527 |
+
demo.launch()
|
528 |
+
""",
|
529 |
+
description="Basic image classification interface",
|
530 |
+
components=["Image", "Button", "Label"],
|
531 |
+
metadata={"category": "computer_vision"}
|
532 |
+
),
|
533 |
+
|
534 |
+
"text_analyzer": Template(
|
535 |
+
code="""
|
536 |
+
import gradio as gr
|
537 |
+
import numpy as np
|
538 |
+
|
539 |
+
def analyze_text(text, options):
|
540 |
+
if not text:
|
541 |
+
return "Please enter some text"
|
542 |
|
543 |
+
results = []
|
544 |
+
if "word_count" in options:
|
545 |
+
results.append(f"Word count: {len(text.split())}")
|
546 |
+
if "char_count" in options:
|
547 |
+
results.append(f"Character count: {len(text)}")
|
548 |
+
if "sentiment" in options:
|
549 |
+
# Add sentiment analysis logic here
|
550 |
+
results.append("Sentiment: Neutral")
|
551 |
|
552 |
+
return "\\n".join(results)
|
553 |
+
|
554 |
+
with gr.Blocks(theme=gr.themes.Soft()) as demo:
|
555 |
+
gr.Markdown("# Text Analysis Tool")
|
556 |
+
with gr.Row():
|
557 |
+
with gr.Column():
|
558 |
+
input_text = gr.Textbox(
|
559 |
+
label="Input Text",
|
560 |
+
placeholder="Enter text to analyze...",
|
561 |
+
lines=5
|
562 |
+
)
|
563 |
+
options = gr.CheckboxGroup(
|
564 |
+
choices=["word_count", "char_count", "sentiment"],
|
565 |
+
label="Analysis Options",
|
566 |
+
value=["word_count"]
|
567 |
+
)
|
568 |
+
analyze_btn = gr.Button("Analyze")
|
569 |
+
with gr.Column():
|
570 |
+
output_text = gr.Textbox(
|
571 |
+
label="Analysis Results",
|
572 |
+
lines=5
|
573 |
+
)
|
574 |
|
575 |
+
analyze_btn.click(
|
576 |
+
fn=analyze_text,
|
577 |
+
inputs=[input_text, options],
|
578 |
+
outputs=output_text
|
579 |
+
)
|
580 |
+
|
581 |
+
if __name__ == "__main__":
|
582 |
+
demo.launch()
|
583 |
+
""",
|
584 |
+
description="Text analysis interface with multiple options",
|
585 |
+
components=["Textbox", "CheckboxGroup", "Button"],
|
586 |
+
metadata={"category": "nlp"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
587 |
)
|
588 |
+
}
|
589 |
|
590 |
+
def save_template(self, name: str, template: Template) -> bool:
|
591 |
+
"""Save new template"""
|
592 |
+
try:
|
593 |
+
template_path = self.template_dir / f"{name}.json"
|
594 |
+
template_dict = {
|
595 |
+
"code": template.code,
|
596 |
+
"description": template.description,
|
597 |
+
"components": template.components,
|
598 |
+
"metadata": template.metadata,
|
599 |
+
"version": template.version
|
600 |
+
}
|
601 |
+
|
602 |
+
with open(template_path, 'w', encoding='utf-8') as f:
|
603 |
+
json.dump(template_dict, f, indent=4)
|
604 |
+
|
605 |
+
self.templates[name] = template
|
606 |
+
return True
|
607 |
+
|
608 |
+
except Exception as e:
|
609 |
+
logger.error(f"Error saving template {name}: {e}")
|
610 |
+
return False
|
611 |
|
612 |
+
def get_template(self, name: str) -> Optional[Template]:
|
613 |
+
"""Get template by name"""
|
614 |
+
return self.templates.get(name)
|
615 |
+
|
616 |
+
def list_templates(self, category: Optional[str] = None) -> List[Dict[str, Any]]:
|
617 |
+
"""List all available templates with optional category filter"""
|
618 |
+
templates_list = []
|
619 |
+
for name, template in self.templates.items():
|
620 |
+
if category and template.metadata.get("category") != category:
|
621 |
+
continue
|
622 |
+
templates_list.append({
|
623 |
+
"name": name,
|
624 |
+
"description": template.description,
|
625 |
+
"components": template.components,
|
626 |
+
"category": template.metadata.get("category", "general")
|
627 |
+
})
|
628 |
+
return templates_list
|
629 |
+
|
630 |
+
class InterfaceAnalyzer:
|
631 |
+
"""Analyzes Gradio interfaces"""
|
632 |
+
|
633 |
+
@staticmethod
|
634 |
+
def extract_components(code: str) -> List[ComponentConfig]:
|
635 |
+
"""Extract components from code"""
|
636 |
+
components = []
|
637 |
+
try:
|
638 |
+
tree = ast.parse(code)
|
639 |
+
for node in ast.walk(tree):
|
640 |
+
if isinstance(node, ast.Call):
|
641 |
+
if isinstance(node.func, ast.Attribute):
|
642 |
+
if hasattr(node.func.value, 'id') and node.func.value.id == 'gr':
|
643 |
+
component_type = node.func.attr
|
644 |
+
if hasattr(ComponentType, component_type.upper()):
|
645 |
+
# Extract component properties
|
646 |
+
properties = {}
|
647 |
+
label = None
|
648 |
+
events = []
|
649 |
+
|
650 |
+
# Get properties from keywords
|
651 |
+
for keyword in node.keywords:
|
652 |
+
if keyword.arg == 'label':
|
653 |
+
try:
|
654 |
+
label = ast.literal_eval(keyword.value)
|
655 |
+
except:
|
656 |
+
label = None
|
657 |
+
else:
|
658 |
+
try:
|
659 |
+
properties[keyword.arg] = ast.literal_eval(keyword.value)
|
660 |
+
except:
|
661 |
+
properties[keyword.arg] = None
|
662 |
+
|
663 |
+
# Look for event handlers
|
664 |
+
parent = InterfaceAnalyzer._find_parent_assign(tree, node)
|
665 |
+
if parent:
|
666 |
+
events = InterfaceAnalyzer._find_component_events(tree, parent)
|
667 |
+
|
668 |
+
components.append(ComponentConfig(
|
669 |
+
type=ComponentType[component_type.upper()],
|
670 |
+
label=label or component_type,
|
671 |
+
properties=properties,
|
672 |
+
events=events
|
673 |
+
))
|
674 |
+
|
675 |
+
except Exception as e:
|
676 |
+
logger.error(f"Error extracting components: {e}")
|
677 |
+
|
678 |
+
return components
|
679 |
+
|
680 |
+
@staticmethod
|
681 |
+
def _find_parent_assign(tree: ast.AST, node: ast.Call) -> Optional[ast.AST]:
|
682 |
+
"""Find the assignment node for a component"""
|
683 |
+
for potential_parent in ast.walk(tree):
|
684 |
+
if isinstance(potential_parent, ast.Assign):
|
685 |
+
for child in ast.walk(potential_parent.value):
|
686 |
+
if child == node:
|
687 |
+
return potential_parent
|
688 |
+
return None
|
689 |
|
690 |
+
@staticmethod
|
691 |
+
def _find_component_events(tree: ast.AST, assign_node: ast.Assign) -> List[str]:
|
692 |
+
"""Find events attached to a component"""
|
693 |
+
events = []
|
694 |
+
component_name = assign_node.targets[0].id
|
695 |
+
|
696 |
+
for node in ast.walk(tree):
|
697 |
+
if isinstance(node, ast.Call):
|
698 |
+
if isinstance(node.func, ast.Attribute):
|
699 |
+
if hasattr(node.func.value, 'id') and node.func.value.id == component_name:
|
700 |
+
events.append(node.func.attr)
|
701 |
+
|
702 |
+
return events
|
703 |
|
704 |
+
@staticmethod
|
705 |
+
def analyze_interface_structure(code: str) -> Dict[str, Any]:
|
706 |
+
"""Analyze interface structure"""
|
707 |
+
try:
|
708 |
+
# Extract components
|
709 |
+
components = InterfaceAnalyzer.extract_components(code)
|
710 |
+
|
711 |
+
# Analyze functions
|
712 |
+
functions = []
|
713 |
+
tree = ast.parse(code)
|
714 |
+
for node in ast.walk(tree):
|
715 |
+
if isinstance(node, ast.FunctionDef):
|
716 |
+
functions.append({
|
717 |
+
"name": node.name,
|
718 |
+
"args": [arg.arg for arg in node.args.args],
|
719 |
+
"returns": InterfaceAnalyzer._get_return_type(node)
|
720 |
+
})
|
721 |
+
|
722 |
+
# Analyze dependencies
|
723 |
+
dependencies = set()
|
724 |
+
for node in ast.walk(tree):
|
725 |
+
if isinstance(node, ast.Import):
|
726 |
+
for name in node.names:
|
727 |
+
dependencies.add(name.name)
|
728 |
+
elif isinstance(node, ast.ImportFrom):
|
729 |
+
if node.module:
|
730 |
+
dependencies.add(node.module)
|
731 |
+
|
732 |
+
return {
|
733 |
+
"components": [
|
734 |
+
{
|
735 |
+
"type": comp.type.value,
|
736 |
+
"label": comp.label,
|
737 |
+
"properties": comp.properties,
|
738 |
+
"events": comp.events
|
739 |
+
}
|
740 |
+
for comp in components
|
741 |
+
],
|
742 |
+
"functions": functions,
|
743 |
+
"dependencies": list(dependencies)
|
744 |
+
}
|
745 |
+
|
746 |
+
except Exception as e:
|
747 |
+
logger.error(f"Error analyzing interface: {e}")
|
748 |
+
return {}
|
749 |
+
|
750 |
+
@staticmethod
|
751 |
+
def _get_return_type(node: ast.FunctionDef) -> str:
|
752 |
+
"""Get function return type if specified"""
|
753 |
+
if node.returns:
|
754 |
+
return ast.unparse(node.returns)
|
755 |
+
return "Any"
|
756 |
+
|
757 |
+
class PreviewManager:
|
758 |
+
"""Manages interface previews"""
|
759 |
+
|
760 |
+
def __init__(self):
|
761 |
+
self.current_process: Optional[subprocess.Popen] = None
|
762 |
+
self.preview_port = DEFAULT_PORT
|
763 |
+
self._lock = threading.Lock()
|
764 |
|
765 |
+
def start_preview(self, code: str) -> Tuple[bool, str]:
|
766 |
+
"""Start preview in a separate process"""
|
767 |
+
with self._lock:
|
768 |
try:
|
769 |
+
self.stop_preview()
|
770 |
+
|
771 |
+
# Create temporary module
|
772 |
+
module_path = create_temp_module(code)
|
773 |
+
|
774 |
+
# Start new process
|
775 |
+
self.current_process = subprocess.Popen(
|
776 |
+
['python', module_path],
|
777 |
+
stdout=subprocess.PIPE,
|
778 |
+
stderr=subprocess.PIPE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
779 |
)
|
|
|
780 |
|
781 |
+
# Wait for server to start
|
782 |
+
time.sleep(2)
|
783 |
+
|
784 |
+
# Check if process is still running
|
785 |
+
if self.current_process.poll() is not None:
|
786 |
+
stdout, stderr = self.current_process.communicate()
|
787 |
+
error_msg = stderr.decode('utf-8')
|
788 |
+
raise RuntimeError(f"Preview failed to start: {error_msg}")
|
789 |
+
|
790 |
+
return True, f"http://localhost:{self.preview_port}"
|
791 |
+
|
792 |
except Exception as e:
|
793 |
+
return False, str(e)
|
794 |
|
795 |
+
def stop_preview(self):
|
796 |
+
"""Stop current preview process"""
|
797 |
+
if self.current_process:
|
798 |
+
self.current_process.terminate()
|
799 |
+
try:
|
800 |
+
self.current_process.wait(timeout=5)
|
801 |
+
except subprocess.TimeoutExpired:
|
802 |
+
self.current_process.kill()
|
803 |
+
self.current_process = None
|
804 |
+
|
805 |
+
def cleanup(self):
|
806 |
+
"""Cleanup resources"""
|
807 |
+
self.stop_preview()
|
808 |
+
# Clean up temporary files
|
809 |
+
for temp_file in TEMP_DIR.glob("*.py"):
|
810 |
try:
|
811 |
+
temp_file.unlink()
|
|
|
|
|
|
|
812 |
except Exception as e:
|
813 |
+
logger.error(f"Error deleting temporary file {temp_file}: {e}")
|
814 |
|
815 |
+
class GradioInterface:
|
816 |
+
"""Main Gradio interface builder class"""
|
|
|
|
|
|
|
|
|
817 |
|
818 |
+
def __init__(self):
|
819 |
+
"""Initialize the Gradio interface builder"""
|
820 |
+
try:
|
821 |
+
self.rag_system = MultimodalRAG()
|
822 |
+
self.template_manager = TemplateManager()
|
823 |
+
self.preview_manager = PreviewManager()
|
824 |
+
self.current_code = ""
|
825 |
+
self.error_log = []
|
826 |
+
self.interface = self._create_interface()
|
827 |
+
|
828 |
+
except Exception as e:
|
829 |
+
logger.error(f"Error initializing GradioInterface: {str(e)}")
|
830 |
+
raise
|
831 |
|
832 |
+
def _create_interface(self) -> gr.Blocks:
|
833 |
+
"""Create the main Gradio interface"""
|
834 |
+
with gr.Blocks(theme=gr.themes.Soft()) as interface:
|
835 |
+
gr.Markdown("# 🚀 Gradio Interface Builder")
|
|
|
|
|
|
|
|
|
|
|
|
|
836 |
|
837 |
+
with gr.Tabs():
|
838 |
+
# Design Tab
|
839 |
+
with gr.Tab("Design"):
|
840 |
+
with gr.Row():
|
841 |
+
with gr.Column(scale=2):
|
842 |
+
# Input Section
|
843 |
+
gr.Markdown("## 📝 Design Your Interface")
|
844 |
+
description = gr.Textbox(
|
845 |
+
label="Description",
|
846 |
+
placeholder="Describe the interface you want to create...",
|
847 |
+
lines=3
|
848 |
+
)
|
849 |
+
screenshot = gr.Image(
|
850 |
+
label="Screenshot (optional)",
|
851 |
+
type="pil"
|
852 |
+
)
|
853 |
+
|
854 |
+
with gr.Row():
|
855 |
+
generate_btn = gr.Button("🎨 Generate Interface", variant="primary")
|
856 |
+
clear_btn = gr.Button("🗑️ Clear")
|
857 |
+
|
858 |
+
# Template Selection
|
859 |
+
gr.Markdown("### 📚 Templates")
|
860 |
+
template_dropdown = gr.Dropdown(
|
861 |
+
choices=self._get_template_choices(),
|
862 |
+
label="Base Template",
|
863 |
+
interactive=True
|
864 |
+
)
|
865 |
+
|
866 |
+
with gr.Column(scale=3):
|
867 |
+
# Code Editor
|
868 |
+
code_editor = gr.Code(
|
869 |
+
label="Generated Code",
|
870 |
+
language="python",
|
871 |
+
interactive=True
|
872 |
+
)
|
873 |
+
|
874 |
+
with gr.Row():
|
875 |
+
validate_btn = gr.Button("✅ Validate")
|
876 |
+
format_btn = gr.Button("📋 Format")
|
877 |
+
save_template_btn = gr.Button("💾 Save as Template")
|
878 |
+
|
879 |
+
validation_output = gr.Markdown()
|
880 |
+
|
881 |
+
# Preview Tab
|
882 |
+
with gr.Tab("Preview"):
|
883 |
+
with gr.Row():
|
884 |
+
preview_btn = gr.Button("▶️ Start Preview", variant="primary")
|
885 |
+
stop_preview_btn = gr.Button("⏹️ Stop Preview")
|
886 |
+
|
887 |
+
preview_frame = gr.HTML(
|
888 |
+
label="Preview",
|
889 |
+
value="<p>Click 'Start Preview' to see your interface</p>"
|
890 |
+
)
|
891 |
+
preview_status = gr.Markdown()
|
892 |
+
|
893 |
+
# Analysis Tab
|
894 |
+
with gr.Tab("Analysis"):
|
895 |
+
analyze_btn = gr.Button("🔍 Analyze Interface")
|
896 |
+
|
897 |
+
with gr.Row():
|
898 |
+
with gr.Column():
|
899 |
+
gr.Markdown("### 🧩 Components")
|
900 |
+
components_json = gr.JSON(label="Detected Components")
|
901 |
+
|
902 |
+
with gr.Column():
|
903 |
+
gr.Markdown("### 🔄 Functions")
|
904 |
+
functions_json = gr.JSON(label="Interface Functions")
|
905 |
+
|
906 |
+
with gr.Row():
|
907 |
+
with gr.Column():
|
908 |
+
gr.Markdown("### 📦 Dependencies")
|
909 |
+
dependencies_json = gr.JSON(label="Required Dependencies")
|
910 |
+
|
911 |
+
with gr.Column():
|
912 |
+
gr.Markdown("### 📄 Requirements")
|
913 |
+
requirements_text = gr.Textbox(
|
914 |
+
label="requirements.txt",
|
915 |
+
lines=10
|
916 |
+
)
|
917 |
+
|
918 |
+
# Event handlers
|
919 |
+
generate_btn.click(
|
920 |
+
fn=self._generate_interface,
|
921 |
+
inputs=[description, screenshot, template_dropdown],
|
922 |
+
outputs=[code_editor, validation_output]
|
923 |
)
|
924 |
|
925 |
+
clear_btn.click(
|
926 |
+
fn=self._clear_interface,
|
927 |
+
outputs=[description, screenshot, code_editor, validation_output]
|
|
|
|
|
|
|
|
|
|
|
928 |
)
|
929 |
|
930 |
+
validate_btn.click(
|
931 |
+
fn=self._validate_code,
|
932 |
+
inputs=[code_editor],
|
933 |
+
outputs=[validation_output]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
934 |
)
|
935 |
|
936 |
+
format_btn.click(
|
937 |
+
fn=self._format_code,
|
938 |
+
inputs=[code_editor],
|
939 |
+
outputs=[code_editor]
|
|
|
|
|
|
|
940 |
)
|
941 |
|
942 |
+
save_template_btn.click(
|
943 |
+
fn=self._save_as_template,
|
944 |
+
inputs=[code_editor, description],
|
945 |
+
outputs=[template_dropdown, validation_output]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
946 |
)
|
947 |
|
948 |
+
preview_btn.click(
|
949 |
+
fn=self._start_preview,
|
950 |
+
inputs=[code_editor],
|
951 |
+
outputs=[preview_frame, preview_status]
|
952 |
)
|
953 |
|
954 |
+
stop_preview_btn.click(
|
955 |
+
fn=self._stop_preview,
|
956 |
+
outputs=[preview_frame, preview_status]
|
957 |
+
)
|
|
|
|
|
|
|
|
|
958 |
|
959 |
+
analyze_btn.click(
|
960 |
+
fn=self._analyze_interface,
|
961 |
+
inputs=[code_editor],
|
962 |
+
outputs=[
|
963 |
+
components_json,
|
964 |
+
functions_json,
|
965 |
+
dependencies_json,
|
966 |
+
requirements_text
|
967 |
+
]
|
968 |
+
)
|
969 |
|
970 |
+
# Update template dropdown when templates change
|
971 |
+
template_dropdown.change(
|
972 |
+
fn=self._load_template,
|
973 |
+
inputs=[template_dropdown],
|
974 |
+
outputs=[code_editor]
|
975 |
+
)
|
|
|
|
|
976 |
|
977 |
+
return interface
|
|
|
978 |
|
979 |
+
def _get_template_choices(self) -> List[str]:
|
980 |
+
"""Get list of available templates"""
|
981 |
+
templates = self.template_manager.list_templates()
|
982 |
+
return [""] + [t["name"] for t in templates]
|
983 |
+
|
984 |
+
def _generate_interface(
|
985 |
+
self,
|
986 |
+
description: str,
|
987 |
+
screenshot: Optional[Image.Image],
|
988 |
+
template_name: str
|
989 |
+
) -> Tuple[str, str]:
|
990 |
+
"""Generate interface code"""
|
991 |
+
try:
|
992 |
+
if template_name:
|
993 |
+
template = self.template_manager.get_template(template_name)
|
994 |
+
if template:
|
995 |
+
code = self.rag_system.generate_code(description, template.code)
|
996 |
+
else:
|
997 |
+
raise ValueError(f"Template {template_name} not found")
|
998 |
+
else:
|
999 |
+
code = self.rag_system.generate_interface(screenshot, description)
|
1000 |
|
1001 |
+
self.current_code = code
|
1002 |
+
return code, "✅ Code generated successfully"
|
1003 |
+
|
1004 |
+
except Exception as e:
|
1005 |
+
error_msg = f"❌ Error generating interface: {str(e)}"
|
1006 |
+
logger.error(error_msg)
|
1007 |
+
return "", error_msg
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1008 |
|
1009 |
+
def _clear_interface(self) -> Tuple[str, None, str, str]:
|
1010 |
+
"""Clear all inputs and outputs"""
|
1011 |
+
self.current_code = ""
|
1012 |
+
return "", None, "", ""
|
|
|
|
|
1013 |
|
1014 |
+
def _validate_code(self, code: str) -> str:
|
1015 |
+
"""Validate code syntax"""
|
1016 |
+
is_valid, message = validate_code(code)
|
1017 |
+
return f"{'✅' if is_valid else '❌'} {message}"
|
1018 |
|
1019 |
+
def _format_code(self, code: str) -> str:
|
1020 |
+
"""Format code"""
|
1021 |
+
try:
|
1022 |
+
return CodeFormatter.format_code(code)
|
1023 |
+
except Exception as e:
|
1024 |
+
logger.error(f"Error formatting code: {e}")
|
1025 |
+
return code
|
1026 |
|
1027 |
+
def _save_as_template(self, code: str, description: str) -> Tuple[List[str], str]:
|
1028 |
+
"""Save current code as template"""
|
1029 |
+
try:
|
1030 |
+
# Generate template name
|
1031 |
+
base_name = "custom_template"
|
1032 |
+
counter = 1
|
1033 |
+
name = base_name
|
1034 |
+
while self.template_manager.get_template(name):
|
1035 |
+
name = f"{base_name}_{counter}"
|
1036 |
+
counter += 1
|
1037 |
+
|
1038 |
+
# Create template
|
1039 |
+
template = Template(
|
1040 |
+
code=code,
|
1041 |
+
description=description,
|
1042 |
+
components=InterfaceAnalyzer.extract_components(code),
|
1043 |
+
metadata={"category": "custom"}
|
1044 |
+
)
|
1045 |
+
|
1046 |
+
# Save template
|
1047 |
+
if self.template_manager.save_template(name, template):
|
1048 |
+
return self._get_template_choices(), f"✅ Template saved as {name}"
|
1049 |
+
else:
|
1050 |
+
raise Exception("Failed to save template")
|
1051 |
+
|
1052 |
+
except Exception as e:
|
1053 |
+
error_msg = f"❌ Error saving template: {str(e)}"
|
1054 |
+
logger.error(error_msg)
|
1055 |
+
return self._get_template_choices(), error_msg
|
1056 |
|
1057 |
+
def _start_preview(self, code: str) -> Tuple[str, str]:
|
1058 |
+
"""Start interface preview"""
|
1059 |
+
success, result = self.preview_manager.start_preview(code)
|
1060 |
+
if success:
|
1061 |
+
return f'<iframe src="{result}" width="100%" height="600px"></iframe>', "✅ Preview started"
|
1062 |
+
else:
|
1063 |
+
return "", f"❌ Preview failed: {result}"
|
1064 |
|
1065 |
+
def _stop_preview(self) -> Tuple[str, str]:
|
1066 |
+
"""Stop interface preview"""
|
1067 |
+
self.preview_manager.stop_preview()
|
1068 |
+
return "<p>Preview stopped</p>", "✅ Preview stopped"
|
1069 |
|
1070 |
+
def _load_template(self, template_name: str) -> str:
|
1071 |
+
"""Load selected template"""
|
1072 |
+
if not template_name:
|
1073 |
+
return ""
|
1074 |
+
|
1075 |
+
template = self.template_manager.get_template(template_name)
|
1076 |
+
if template:
|
1077 |
+
return template.code
|
1078 |
+
return ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1079 |
|
1080 |
+
def _analyze_interface(self, code: str) -> Tuple[Dict, Dict, Dict, str]:
|
1081 |
+
"""Analyze interface structure"""
|
1082 |
try:
|
1083 |
+
analysis = InterfaceAnalyzer.analyze_interface_structure(code)
|
1084 |
+
|
1085 |
+
# Generate requirements.txt
|
1086 |
+
dependencies = analysis.get("dependencies", [])
|
1087 |
+
requirements = CodeGenerator.generate_requirements(dependencies)
|
1088 |
+
|
1089 |
+
return (
|
1090 |
+
analysis.get("components", {}),
|
1091 |
+
analysis.get("functions", {}),
|
1092 |
+
{"dependencies": dependencies},
|
1093 |
+
requirements
|
1094 |
+
)
|
1095 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1096 |
except Exception as e:
|
1097 |
+
logger.error(f"Error analyzing interface: {e}")
|
1098 |
+
return {}, {}, {}, ""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1099 |
|
1100 |
+
def launch(self, **kwargs):
|
1101 |
+
"""Launch the interface"""
|
1102 |
+
try:
|
1103 |
+
self.interface.launch(**kwargs)
|
1104 |
+
finally:
|
1105 |
+
self.cleanup()
|
|
|
|
|
|
|
|
|
1106 |
|
1107 |
+
def cleanup(self):
|
1108 |
+
"""Cleanup resources"""
|
1109 |
+
try:
|
1110 |
+
self.preview_manager.cleanup()
|
1111 |
+
self.rag_system.cleanup()
|
1112 |
+
except Exception as e:
|
1113 |
+
logger.error(f"Error during cleanup: {e}")
|
1114 |
|
1115 |
def main():
|
1116 |
+
"""Main entry point"""
|
1117 |
try:
|
1118 |
+
# Set up logging
|
1119 |
+
logging.basicConfig(
|
1120 |
+
level=logging.INFO,
|
1121 |
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
1122 |
+
)
|
1123 |
+
|
1124 |
+
# Create and launch interface
|
1125 |
interface = GradioInterface()
|
1126 |
+
interface.launch(
|
|
|
|
|
|
|
1127 |
share=True,
|
1128 |
+
debug=True,
|
1129 |
+
server_name="0.0.0.0"
|
1130 |
)
|
1131 |
+
|
1132 |
except Exception as e:
|
1133 |
+
logger.error(f"Application error: {e}")
|
1134 |
raise
|
1135 |
|
1136 |
if __name__ == "__main__":
|