ginipick commited on
Commit
9b3914d
·
verified ·
1 Parent(s): 8b94da2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -505
app.py CHANGED
@@ -1,510 +1,35 @@
1
- import types
2
- import random
3
- import spaces
4
- import logging
5
  import os
6
- from pathlib import Path
7
- from datetime import datetime
8
-
9
- import torch
10
- import numpy as np
11
- import torchaudio
12
- from diffusers import AutoencoderKLWan, UniPCMultistepScheduler
13
- from diffusers.utils import export_to_video
14
- from diffusers import AutoModel
15
- import gradio as gr
16
- import tempfile
17
- from huggingface_hub import hf_hub_download
18
-
19
- from src.pipeline_wan_nag import NAGWanPipeline
20
- from src.transformer_wan_nag import NagWanTransformer3DModel
21
-
22
- # MMAudio imports
23
- try:
24
- import mmaudio
25
- except ImportError:
26
- os.system("pip install -e .")
27
- import mmaudio
28
-
29
- from mmaudio.eval_utils import (ModelConfig, all_model_cfg, generate as mmaudio_generate,
30
- load_video, make_video, setup_eval_logging)
31
- from mmaudio.model.flow_matching import FlowMatching
32
- from mmaudio.model.networks import MMAudio, get_my_mmaudio
33
- from mmaudio.model.sequence_config import SequenceConfig
34
- from mmaudio.model.utils.features_utils import FeaturesUtils
35
-
36
- # NAG Video Settings
37
- MOD_VALUE = 32
38
- DEFAULT_DURATION_SECONDS = 4
39
- DEFAULT_STEPS = 4
40
- DEFAULT_SEED = 2025
41
- DEFAULT_H_SLIDER_VALUE = 480
42
- DEFAULT_W_SLIDER_VALUE = 832
43
- NEW_FORMULA_MAX_AREA = 480.0 * 832.0
44
-
45
- SLIDER_MIN_H, SLIDER_MAX_H = 128, 896
46
- SLIDER_MIN_W, SLIDER_MAX_W = 128, 896
47
- MAX_SEED = np.iinfo(np.int32).max
48
-
49
- FIXED_FPS = 16
50
- MIN_FRAMES_MODEL = 8
51
- MAX_FRAMES_MODEL = 129
52
-
53
- DEFAULT_NAG_NEGATIVE_PROMPT = "Static, motionless, still, ugly, bad quality, worst quality, poorly drawn, low resolution, blurry, lack of details"
54
- DEFAULT_AUDIO_NEGATIVE_PROMPT = "music"
55
-
56
- # NAG Model Settings
57
- MODEL_ID = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
58
- SUB_MODEL_ID = "vrgamedevgirl84/Wan14BT2VFusioniX"
59
- SUB_MODEL_FILENAME = "Wan14BT2VFusioniX_fp16_.safetensors"
60
- LORA_REPO_ID = "Kijai/WanVideo_comfy"
61
- LORA_FILENAME = "Wan21_CausVid_14B_T2V_lora_rank32.safetensors"
62
-
63
- # MMAudio Settings
64
- torch.backends.cuda.matmul.allow_tf32 = True
65
- torch.backends.cudnn.allow_tf32 = True
66
- log = logging.getLogger()
67
- device = 'cuda'
68
- dtype = torch.bfloat16
69
- audio_model_config: ModelConfig = all_model_cfg['large_44k_v2']
70
- audio_model_config.download_if_needed()
71
- setup_eval_logging()
72
-
73
- # Initialize NAG Video Model
74
- vae = AutoencoderKLWan.from_pretrained(MODEL_ID, subfolder="vae", torch_dtype=torch.float32)
75
- wan_path = hf_hub_download(repo_id=SUB_MODEL_ID, filename=SUB_MODEL_FILENAME)
76
- transformer = NagWanTransformer3DModel.from_single_file(wan_path, torch_dtype=torch.bfloat16)
77
- pipe = NAGWanPipeline.from_pretrained(
78
- MODEL_ID, vae=vae, transformer=transformer, torch_dtype=torch.bfloat16
79
- )
80
- pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=5.0)
81
- pipe.to("cuda")
82
-
83
- pipe.transformer.__class__.attn_processors = NagWanTransformer3DModel.attn_processors
84
- pipe.transformer.__class__.set_attn_processor = NagWanTransformer3DModel.set_attn_processor
85
- pipe.transformer.__class__.forward = NagWanTransformer3DModel.forward
86
-
87
- # Initialize MMAudio Model
88
- def get_mmaudio_model() -> tuple[MMAudio, FeaturesUtils, SequenceConfig]:
89
- seq_cfg = audio_model_config.seq_cfg
90
-
91
- net: MMAudio = get_my_mmaudio(audio_model_config.model_name).to(device, dtype).eval()
92
- net.load_weights(torch.load(audio_model_config.model_path, map_location=device, weights_only=True))
93
- log.info(f'Loaded MMAudio weights from {audio_model_config.model_path}')
94
-
95
- feature_utils = FeaturesUtils(tod_vae_ckpt=audio_model_config.vae_path,
96
- synchformer_ckpt=audio_model_config.synchformer_ckpt,
97
- enable_conditions=True,
98
- mode=audio_model_config.mode,
99
- bigvgan_vocoder_ckpt=audio_model_config.bigvgan_16k_path,
100
- need_vae_encoder=False)
101
- feature_utils = feature_utils.to(device, dtype).eval()
102
-
103
- return net, feature_utils, seq_cfg
104
-
105
- audio_net, audio_feature_utils, audio_seq_cfg = get_mmaudio_model()
106
-
107
- # Audio generation function
108
- @torch.inference_mode()
109
- def add_audio_to_video(video_path, prompt, audio_negative_prompt, audio_steps, audio_cfg_strength, duration):
110
- """Generate and add audio to video using MMAudio"""
111
- rng = torch.Generator(device=device)
112
- rng.seed() # Random seed for audio
113
- fm = FlowMatching(min_sigma=0, inference_mode='euler', num_steps=audio_steps)
114
-
115
- video_info = load_video(video_path, duration)
116
- clip_frames = video_info.clip_frames
117
- sync_frames = video_info.sync_frames
118
- duration = video_info.duration_sec
119
- clip_frames = clip_frames.unsqueeze(0)
120
- sync_frames = sync_frames.unsqueeze(0)
121
- audio_seq_cfg.duration = duration
122
- audio_net.update_seq_lengths(audio_seq_cfg.latent_seq_len, audio_seq_cfg.clip_seq_len, audio_seq_cfg.sync_seq_len)
123
-
124
- audios = mmaudio_generate(clip_frames,
125
- sync_frames, [prompt],
126
- negative_text=[audio_negative_prompt],
127
- feature_utils=audio_feature_utils,
128
- net=audio_net,
129
- fm=fm,
130
- rng=rng,
131
- cfg_strength=audio_cfg_strength)
132
- audio = audios.float().cpu()[0]
133
-
134
- # Create video with audio
135
- video_with_audio_path = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name
136
- make_video(video_info, video_with_audio_path, audio, sampling_rate=audio_seq_cfg.sampling_rate)
137
-
138
- return video_with_audio_path
139
-
140
- # Combined generation function
141
- def get_duration(prompt, nag_negative_prompt, nag_scale, height, width, duration_seconds,
142
- steps, seed, randomize_seed, enable_audio, audio_negative_prompt,
143
- audio_steps, audio_cfg_strength):
144
- # Calculate total duration including audio processing if enabled
145
- video_duration = int(duration_seconds) * int(steps) * 2.25 + 5
146
- audio_duration = 30 if enable_audio else 0 # Additional time for audio processing
147
- return video_duration + audio_duration
148
-
149
- @spaces.GPU(duration=get_duration)
150
- def generate_video_with_audio(
151
- prompt,
152
- nag_negative_prompt, nag_scale,
153
- height=DEFAULT_H_SLIDER_VALUE, width=DEFAULT_W_SLIDER_VALUE, duration_seconds=DEFAULT_DURATION_SECONDS,
154
- steps=DEFAULT_STEPS,
155
- seed=DEFAULT_SEED, randomize_seed=False,
156
- enable_audio=True, audio_negative_prompt=DEFAULT_AUDIO_NEGATIVE_PROMPT,
157
- audio_steps=25, audio_cfg_strength=4.5,
158
- ):
159
- # Generate video first
160
- target_h = max(MOD_VALUE, (int(height) // MOD_VALUE) * MOD_VALUE)
161
- target_w = max(MOD_VALUE, (int(width) // MOD_VALUE) * MOD_VALUE)
162
-
163
- num_frames = np.clip(int(round(int(duration_seconds) * FIXED_FPS) + 1), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
164
-
165
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
166
-
167
- with torch.inference_mode():
168
- nag_output_frames_list = pipe(
169
- prompt=prompt,
170
- nag_negative_prompt=nag_negative_prompt,
171
- nag_scale=nag_scale,
172
- nag_tau=3.5,
173
- nag_alpha=0.5,
174
- height=target_h, width=target_w, num_frames=num_frames,
175
- guidance_scale=0.,
176
- num_inference_steps=int(steps),
177
- generator=torch.Generator(device="cuda").manual_seed(current_seed)
178
- ).frames[0]
179
-
180
- # Save initial video without audio
181
- with tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) as tmpfile:
182
- temp_video_path = tmpfile.name
183
- export_to_video(nag_output_frames_list, temp_video_path, fps=FIXED_FPS)
184
-
185
- # Add audio if enabled
186
- if enable_audio:
187
- try:
188
- final_video_path = add_audio_to_video(
189
- temp_video_path,
190
- prompt, # Use the same prompt for audio generation
191
- audio_negative_prompt,
192
- audio_steps,
193
- audio_cfg_strength,
194
- duration_seconds
195
- )
196
- # Clean up temp video
197
- if os.path.exists(temp_video_path):
198
- os.remove(temp_video_path)
199
- except Exception as e:
200
- log.error(f"Audio generation failed: {e}")
201
- final_video_path = temp_video_path
202
- else:
203
- final_video_path = temp_video_path
204
-
205
- return final_video_path, current_seed
206
-
207
- # Example generation function
208
- def generate_with_example(prompt, nag_negative_prompt, nag_scale):
209
- video_path, seed = generate_video_with_audio(
210
- prompt=prompt,
211
- nag_negative_prompt=nag_negative_prompt, nag_scale=nag_scale,
212
- height=DEFAULT_H_SLIDER_VALUE, width=DEFAULT_W_SLIDER_VALUE,
213
- duration_seconds=DEFAULT_DURATION_SECONDS,
214
- steps=DEFAULT_STEPS,
215
- seed=DEFAULT_SEED, randomize_seed=False,
216
- enable_audio=True, audio_negative_prompt=DEFAULT_AUDIO_NEGATIVE_PROMPT,
217
- audio_steps=25, audio_cfg_strength=4.5,
218
- )
219
- return video_path, \
220
- DEFAULT_H_SLIDER_VALUE, DEFAULT_W_SLIDER_VALUE, \
221
- DEFAULT_DURATION_SECONDS, DEFAULT_STEPS, seed, \
222
- True, DEFAULT_AUDIO_NEGATIVE_PROMPT, 25, 4.5
223
-
224
- # Examples with audio descriptions
225
- examples = [
226
- ["Midnight highway outside a neon-lit city. A black 1973 Porsche 911 Carrera RS speeds at 120 km/h. Inside, a stylish singer-guitarist sings while driving, vintage sunburst guitar on the passenger seat. Sodium streetlights streak over the hood; RGB panels shift magenta to blue on the driver. Camera: drone dive, Russian-arm low wheel shot, interior gimbal, FPV barrel roll, overhead spiral. Neo-noir palette, rain-slick asphalt reflections, roaring flat-six engine blended with live guitar.", DEFAULT_NAG_NEGATIVE_PROMPT, 11],
227
- ["Arena rock concert packed with 20 000 fans. A flamboyant lead guitarist in leather jacket and mirrored aviators shreds a cherry-red Flying V on a thrust stage. Pyro flames shoot up on every downbeat, CO₂ jets burst behind. Moving-head spotlights swirl teal and amber, follow-spots rim-light the guitarist’s hair. Steadicam 360-orbit, crane shot rising over crowd, ultra-slow-motion pick attack at 1 000 fps. Film-grain teal-orange grade, thunderous crowd roar mixes with screaming guitar solo.", DEFAULT_NAG_NEGATIVE_PROMPT, 11],
228
- ["Golden-hour countryside road winding through rolling wheat fields. A man and woman ride a vintage café-racer motorcycle, hair and scarf fluttering in the warm breeze. Drone chase shot reveals endless patchwork farmland; low slider along rear wheel captures dust trail. Sun-flare back-lights the riders, lens blooms on highlights. Soft acoustic rock underscore; engine rumble mixed at –8 dB. Warm pastel color grade, gentle film-grain for nostalgic vibe.", DEFAULT_NAG_NEGATIVE_PROMPT, 11],
229
- ]
230
-
231
- # CSS styling
232
- css = """
233
- .container {
234
- max-width: 1400px;
235
- margin: auto;
236
- padding: 20px;
237
- }
238
- .main-title {
239
- text-align: center;
240
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
241
- -webkit-background-clip: text;
242
- -webkit-text-fill-color: transparent;
243
- font-size: 2.5em;
244
- font-weight: bold;
245
- margin-bottom: 10px;
246
- }
247
- .subtitle {
248
- text-align: center;
249
- color: #6b7280;
250
- margin-bottom: 30px;
251
- }
252
- .prompt-container {
253
- background: linear-gradient(135deg, #f3f4f6 0%, #e5e7eb 100%);
254
- border-radius: 15px;
255
- padding: 20px;
256
- margin-bottom: 20px;
257
- box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
258
- }
259
- .generate-btn {
260
- background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
261
- color: white;
262
- font-size: 1.2em;
263
- font-weight: bold;
264
- padding: 15px 30px;
265
- border-radius: 10px;
266
- border: none;
267
- cursor: pointer;
268
- transition: all 0.3s ease;
269
- width: 100%;
270
- margin-top: 20px;
271
- }
272
- .generate-btn:hover {
273
- transform: translateY(-2px);
274
- box-shadow: 0 6px 20px rgba(102, 126, 234, 0.4);
275
- }
276
- .video-output {
277
- border-radius: 15px;
278
- overflow: hidden;
279
- box-shadow: 0 10px 30px rgba(0, 0, 0, 0.2);
280
- background: #1a1a1a;
281
- padding: 10px;
282
- }
283
- .settings-panel {
284
- background: #f9fafb;
285
- border-radius: 15px;
286
- padding: 20px;
287
- box-shadow: 0 2px 10px rgba(0, 0, 0, 0.05);
288
- }
289
- .slider-container {
290
- background: white;
291
- padding: 15px;
292
- border-radius: 10px;
293
- margin-bottom: 15px;
294
- box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
295
- }
296
- .info-box {
297
- background: linear-gradient(135deg, #e0e7ff 0%, #c7d2fe 100%);
298
- border-radius: 10px;
299
- padding: 15px;
300
- margin: 10px 0;
301
- border-left: 4px solid #667eea;
302
- }
303
- .audio-settings {
304
- background: linear-gradient(135deg, #fef3c7 0%, #fde68a 100%);
305
- border-radius: 10px;
306
- padding: 15px;
307
- margin-top: 10px;
308
- border-left: 4px solid #f59e0b;
309
- }
310
- """
311
-
312
- # Gradio interface
313
- with gr.Blocks(css=css, theme=gr.themes.Soft()) as demo:
314
- with gr.Column(elem_classes="container"):
315
- gr.HTML("""
316
- <h1 class="main-title">🎬 VEO3 Free</h1>
317
- <p class="subtitle">Wan2.1-T2V-14B + Fast 4-step with NAG + Automatic Audio Generation</p>
318
- """)
319
 
320
- gr.HTML("""
321
- <div class="badge-container"
322
- style="display:flex; gap:8px; flex-wrap:wrap; justify-content:center; align-items:center;">
323
-
324
- <a href="https://huggingface.co/spaces/ginigen/VEO3-Free" target="_blank">
325
- <img src="https://img.shields.io/static/v1?label=Text%20to%20Video%2BAudio&message=VEO3%20free&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
326
- </a>
327
- <a href="https://huggingface.co/spaces/ginigen/VEO3-Free-mirror" target="_blank">
328
- <img src="https://img.shields.io/static/v1?label=Text%20to%20Video%2BAudio&message=VEO3%20free%28mirror%29&color=%230000ff&labelColor=%23800080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
329
- </a>
330
- <a href="https://huggingface.co/spaces/ginigen/VEO3-Directors" target="_blank">
331
- <img src="https://img.shields.io/static/v1?label=DIRECTORS&message=VEO3&color=%23ffd700&labelColor=%23000080&logo=huggingface&logoColor=%23ffa500&style=for-the-badge" alt="badge">
332
- </a>
333
- <a href="https://discord.gg/openfreeai" target="_blank">
334
- <img src="https://img.shields.io/static/v1?label=Discord&message=Openfree%20AI&color=%230000ff&labelColor=%23800080&logo=discord&logoColor=%23ffa500&style=for-the-badge" alt="badge">
335
- </a>
336
- </div>
337
- """)
338
- with gr.Row():
339
- with gr.Column(scale=1):
340
- with gr.Group(elem_classes="prompt-container"):
341
- prompt = gr.Textbox(
342
- label="✨ Video Prompt (also used for audio generation)",
343
- placeholder="Describe your video scene in detail...",
344
- lines=3,
345
- elem_classes="prompt-input"
346
- )
347
-
348
- with gr.Accordion("🎨 Advanced Video Settings", open=False):
349
- nag_negative_prompt = gr.Textbox(
350
- label="Video Negative Prompt",
351
- value=DEFAULT_NAG_NEGATIVE_PROMPT,
352
- lines=2,
353
- )
354
- nag_scale = gr.Slider(
355
- label="NAG Scale",
356
- minimum=1.0,
357
- maximum=20.0,
358
- step=0.25,
359
- value=11.0,
360
- info="Higher values = stronger guidance"
361
- )
362
-
363
- with gr.Group(elem_classes="settings-panel"):
364
- gr.Markdown("### ⚙️ Video Settings")
365
-
366
- with gr.Row():
367
- duration_seconds_input = gr.Slider(
368
- minimum=1,
369
- maximum=8,
370
- step=1,
371
- value=DEFAULT_DURATION_SECONDS,
372
- label="📱 Duration (seconds)",
373
- elem_classes="slider-container"
374
- )
375
- steps_slider = gr.Slider(
376
- minimum=1,
377
- maximum=8,
378
- step=1,
379
- value=DEFAULT_STEPS,
380
- label="🔄 Inference Steps",
381
- elem_classes="slider-container"
382
- )
383
-
384
- with gr.Row():
385
- height_input = gr.Slider(
386
- minimum=SLIDER_MIN_H,
387
- maximum=SLIDER_MAX_H,
388
- step=MOD_VALUE,
389
- value=DEFAULT_H_SLIDER_VALUE,
390
- label=f"📐 Height (×{MOD_VALUE})",
391
- elem_classes="slider-container"
392
- )
393
- width_input = gr.Slider(
394
- minimum=SLIDER_MIN_W,
395
- maximum=SLIDER_MAX_W,
396
- step=MOD_VALUE,
397
- value=DEFAULT_W_SLIDER_VALUE,
398
- label=f"📐 Width (×{MOD_VALUE})",
399
- elem_classes="slider-container"
400
- )
401
-
402
- with gr.Row():
403
- seed_input = gr.Slider(
404
- label="🌱 Seed",
405
- minimum=0,
406
- maximum=MAX_SEED,
407
- step=1,
408
- value=DEFAULT_SEED,
409
- interactive=True
410
- )
411
- randomize_seed_checkbox = gr.Checkbox(
412
- label="🎲 Random Seed",
413
- value=True,
414
- interactive=True
415
- )
416
-
417
- with gr.Group(elem_classes="audio-settings"):
418
- gr.Markdown("### 🎵 Audio Generation Settings")
419
-
420
- enable_audio = gr.Checkbox(
421
- label="🔊 Enable Automatic Audio Generation",
422
- value=True,
423
- interactive=True
424
- )
425
-
426
- with gr.Column(visible=True) as audio_settings_group:
427
- audio_negative_prompt = gr.Textbox(
428
- label="Audio Negative Prompt",
429
- value=DEFAULT_AUDIO_NEGATIVE_PROMPT,
430
- placeholder="Elements to avoid in audio (e.g., music, speech)",
431
- )
432
-
433
- with gr.Row():
434
- audio_steps = gr.Slider(
435
- minimum=10,
436
- maximum=50,
437
- step=5,
438
- value=25,
439
- label="🎚️ Audio Steps",
440
- info="More steps = better quality"
441
- )
442
- audio_cfg_strength = gr.Slider(
443
- minimum=1.0,
444
- maximum=10.0,
445
- step=0.5,
446
- value=4.5,
447
- label="🎛️ Audio Guidance",
448
- info="Strength of prompt guidance"
449
- )
450
-
451
- # Toggle audio settings visibility
452
- enable_audio.change(
453
- fn=lambda x: gr.update(visible=x),
454
- inputs=[enable_audio],
455
- outputs=[audio_settings_group]
456
- )
457
-
458
- generate_button = gr.Button(
459
- "🎬 Generate Video with Audio",
460
- variant="primary",
461
- elem_classes="generate-btn"
462
- )
463
-
464
- with gr.Column(scale=1):
465
- video_output = gr.Video(
466
- label="Generated Video with Audio",
467
- autoplay=True,
468
- interactive=False,
469
- elem_classes="video-output"
470
- )
471
-
472
- gr.HTML("""
473
- <div style="text-align: center; margin-top: 20px; color: #6b7280;">
474
- <p>💡 Tip: The same prompt is used for both video and audio generation!</p>
475
- <p>🎧 Audio is automatically matched to the visual content</p>
476
- </div>
477
- """)
478
 
479
- gr.Markdown("### 🎯 Example Prompts")
480
- gr.Examples(
481
- examples=examples,
482
- fn=generate_with_example,
483
- inputs=[prompt, nag_negative_prompt, nag_scale],
484
- outputs=[
485
- video_output,
486
- height_input, width_input, duration_seconds_input,
487
- steps_slider, seed_input,
488
- enable_audio, audio_negative_prompt, audio_steps, audio_cfg_strength
489
- ],
490
- cache_examples="lazy"
491
- )
492
-
493
- # Connect UI elements
494
- ui_inputs = [
495
- prompt,
496
- nag_negative_prompt, nag_scale,
497
- height_input, width_input, duration_seconds_input,
498
- steps_slider,
499
- seed_input, randomize_seed_checkbox,
500
- enable_audio, audio_negative_prompt, audio_steps, audio_cfg_strength,
501
- ]
502
-
503
- generate_button.click(
504
- fn=generate_video_with_audio,
505
- inputs=ui_inputs,
506
- outputs=[video_output, seed_input],
507
- )
508
 
509
  if __name__ == "__main__":
510
- demo.queue().launch()
 
 
 
 
 
1
  import os
2
+ import sys
3
+ import streamlit as st
4
+ from tempfile import NamedTemporaryFile
5
+
6
+ def main():
7
+ try:
8
+ # Get the code from secrets
9
+ code = os.environ.get("MAIN_CODE")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
+ if not code:
12
+ st.error("⚠️ The application code wasn't found in secrets. Please add the MAIN_CODE secret.")
13
+ return
14
+
15
+ # Create a temporary Python file
16
+ with NamedTemporaryFile(suffix='.py', delete=False, mode='w') as tmp:
17
+ tmp.write(code)
18
+ tmp_path = tmp.name
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
+ # Execute the code
21
+ exec(compile(code, tmp_path, 'exec'), globals())
22
+
23
+ # Clean up the temporary file
24
+ try:
25
+ os.unlink(tmp_path)
26
+ except:
27
+ pass
28
+
29
+ except Exception as e:
30
+ st.error(f"⚠️ Error loading or executing the application: {str(e)}")
31
+ import traceback
32
+ st.code(traceback.format_exc())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
  if __name__ == "__main__":
35
+ main()