jbilcke-hf HF Staff commited on
Commit
37d5d37
·
1 Parent(s): b55bb25
Files changed (4) hide show
  1. .claude/settings.local.json +10 -0
  2. app.py +6 -5
  3. docs/lora.md +0 -0
  4. example_of_using_lora.py +571 -0
.claude/settings.local.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "permissions": {
3
+ "allow": [
4
+ "Bash(rg:*)",
5
+ "Bash(find:*)",
6
+ "Bash(python3:*)"
7
+ ],
8
+ "deny": []
9
+ }
10
+ }
app.py CHANGED
@@ -536,11 +536,12 @@ with gr.Blocks(title="Wan2.1 1.3B LoRA Self-Forcing streaming demo") as demo:
536
 
537
  gr.Markdown("### ⚙️ Settings")
538
  with gr.Row():
539
- seed = gr.Number(
540
- label="Seed",
541
- value=-1,
542
- info="Use -1 for random seed",
543
- precision=0
 
544
  )
545
  fps = gr.Slider(
546
  label="Playback FPS",
 
536
 
537
  gr.Markdown("### ⚙️ Settings")
538
  with gr.Row():
539
+ seed = gr.Slider(
540
+ label="Generation Seed (-1 for random)",
541
+ minimum=-1,
542
+ maximum=2147483647, # 2^31 - 1
543
+ step=1,
544
+ value=-1
545
  )
546
  fps = gr.Slider(
547
  label="Playback FPS",
docs/lora.md ADDED
The diff for this file is too large to render. See raw diff
 
example_of_using_lora.py ADDED
@@ -0,0 +1,571 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import tempfile
3
+ import random
4
+ import json
5
+ import os
6
+ import shutil
7
+ import hashlib
8
+ import uuid
9
+ from pathlib import Path
10
+ import time
11
+ import logging
12
+ import torch
13
+ import numpy as np
14
+ from typing import Dict, Any, List, Optional, Tuple, Union
15
+ from diffusers import AutoencoderKLWan, WanPipeline
16
+ from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler
17
+ from diffusers.utils import export_to_video
18
+
19
+ # Configure logging
20
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
21
+ logger = logging.getLogger(__name__)
22
+
23
+ # Constants
24
+ STORAGE_PATH = Path(os.getenv('STORAGE_PATH', './data'))
25
+ LORA_PATH = STORAGE_PATH / "loras"
26
+ OUTPUT_PATH = STORAGE_PATH / "output"
27
+
28
+ MODEL_VERSION = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
29
+ DEFAULT_PROMPT_PREFIX = ""
30
+
31
+ # Create necessary directories
32
+ STORAGE_PATH.mkdir(parents=True, exist_ok=True)
33
+ LORA_PATH.mkdir(parents=True, exist_ok=True)
34
+ OUTPUT_PATH.mkdir(parents=True, exist_ok=True)
35
+
36
+ # Global variables to track model state
37
+ pipe = None
38
+ current_lora_id = None
39
+
40
+ def format_time(seconds: float) -> str:
41
+ """Format time duration in seconds to human readable string"""
42
+ hours = int(seconds // 3600)
43
+ minutes = int((seconds % 3600) // 60)
44
+ secs = int(seconds % 60)
45
+
46
+ parts = []
47
+ if hours > 0:
48
+ parts.append(f"{hours}h")
49
+ if minutes > 0:
50
+ parts.append(f"{minutes}m")
51
+ if secs > 0 or not parts:
52
+ parts.append(f"{secs}s")
53
+
54
+ return " ".join(parts)
55
+
56
+ def upload_lora_file(file: tempfile._TemporaryFileWrapper) -> Tuple[str, str]:
57
+ """Upload a LoRA file and return a hash-based ID for future reference
58
+
59
+ Args:
60
+ file: Uploaded file object from Gradio
61
+
62
+ Returns:
63
+ Tuple[str, str]: Hash-based ID for the stored file (returned twice for both outputs)
64
+ """
65
+ if file is None:
66
+ return "", ""
67
+
68
+ try:
69
+ # Calculate SHA256 hash of the file
70
+ sha256_hash = hashlib.sha256()
71
+ with open(file.name, "rb") as f:
72
+ for chunk in iter(lambda: f.read(4096), b""):
73
+ sha256_hash.update(chunk)
74
+ file_hash = sha256_hash.hexdigest()
75
+
76
+ # Create destination path using hash
77
+ dest_path = LORA_PATH / f"{file_hash}.safetensors"
78
+
79
+ # Check if file already exists
80
+ if dest_path.exists():
81
+ logger.info("LoRA file already exists")
82
+ return file_hash, file_hash
83
+
84
+ # Copy the file to the destination
85
+ shutil.copy(file.name, dest_path)
86
+
87
+ logger.info(f"a new LoRA file has been uploaded")
88
+ return file_hash, file_hash
89
+ except Exception as e:
90
+ logger.error(f"Error uploading LoRA file: {e}")
91
+ raise gr.Error(f"Failed to upload LoRA file: {str(e)}")
92
+
93
+ def get_lora_file_path(lora_id: Optional[str]) -> Optional[Path]:
94
+ """Get the path to a LoRA file from its hash-based ID
95
+
96
+ Args:
97
+ lora_id: Hash-based ID of the stored LoRA file
98
+
99
+ Returns:
100
+ Path: Path to the LoRA file if found, None otherwise
101
+ """
102
+ if not lora_id:
103
+ return None
104
+
105
+ # Check if file exists
106
+ lora_path = LORA_PATH / f"{lora_id}.safetensors"
107
+ if lora_path.exists():
108
+ return lora_path
109
+
110
+ return None
111
+
112
+ def get_or_create_pipeline(
113
+ enable_cpu_offload: bool = True,
114
+ flow_shift: float = 3.0
115
+ ) -> WanPipeline:
116
+ """Get existing pipeline or create a new one if necessary
117
+
118
+ Args:
119
+ enable_cpu_offload: Whether to enable CPU offload
120
+ flow_shift: Flow shift parameter for scheduler
121
+
122
+ Returns:
123
+ WanPipeline: The pipeline for generation
124
+ """
125
+ global pipe
126
+
127
+ if pipe is None:
128
+ # Create a new pipeline
129
+ logger.info("Creating new pipeline")
130
+
131
+ # Load VAE
132
+ vae = AutoencoderKLWan.from_pretrained(MODEL_VERSION, subfolder="vae", torch_dtype=torch.float32)
133
+
134
+ # Load transformer
135
+ pipe = WanPipeline.from_pretrained(MODEL_VERSION, vae=vae, torch_dtype=torch.bfloat16)
136
+
137
+ # Configure scheduler
138
+ pipe.scheduler = UniPCMultistepScheduler.from_config(
139
+ pipe.scheduler.config,
140
+ flow_shift=flow_shift
141
+ )
142
+
143
+ # Move to GPU
144
+ pipe.to("cuda")
145
+
146
+ # Enable CPU offload if requested
147
+ if enable_cpu_offload:
148
+ logger.info("Enabling CPU offload")
149
+ pipe.enable_model_cpu_offload()
150
+ else:
151
+ # Update existing pipeline's scheduler if needed
152
+ if pipe.scheduler.config.flow_shift != flow_shift:
153
+ logger.info(f"Updating scheduler flow_shift from {pipe.scheduler.config.flow_shift} to {flow_shift}")
154
+ pipe.scheduler = UniPCMultistepScheduler.from_config(
155
+ pipe.scheduler.config,
156
+ flow_shift=flow_shift
157
+ )
158
+
159
+ return pipe
160
+
161
+ def manage_lora_weights(pipe: WanPipeline, lora_id: Optional[str], lora_weight: float) -> Tuple[bool, Optional[Path]]:
162
+ """Manage LoRA weights, loading/unloading only when necessary
163
+
164
+ Args:
165
+ pipe: The pipeline to manage LoRA weights for
166
+ lora_id: UUID of LoRA file to use
167
+ lora_weight: Weight of LoRA contribution
168
+
169
+ Returns:
170
+ Tuple[bool, Optional[Path]]: (Is using LoRA, Path to LoRA file)
171
+ """
172
+ global current_lora_id
173
+
174
+ # Determine if we should use LoRA
175
+ using_lora = lora_id is not None and lora_id.strip() != "" and lora_weight > 0
176
+
177
+ # If not using LoRA but we have one loaded, unload it
178
+ if not using_lora and current_lora_id is not None:
179
+ logger.info(f"Unloading current LoRA with ID")
180
+ try:
181
+ # Unload current LoRA weights
182
+ pipe.unload_lora_weights()
183
+ current_lora_id = None
184
+ except Exception as e:
185
+ logger.error(f"Error unloading LoRA weights: {e}")
186
+ return False, None
187
+
188
+ # If using LoRA, check if we need to change weights
189
+ if using_lora:
190
+ lora_path = get_lora_file_path(lora_id)
191
+
192
+ if not lora_path:
193
+ # Log the event but continue with base model
194
+ logger.warning(f"LoRA file with ID {lora_id} not found. Using base model instead.")
195
+
196
+ # If we had a LoRA loaded, unload it
197
+ if current_lora_id is not None:
198
+ logger.info(f"Unloading current LoRA")
199
+ try:
200
+ pipe.unload_lora_weights()
201
+ except Exception as e:
202
+ logger.error(f"Error unloading LoRA weights: {e}")
203
+ current_lora_id = None
204
+
205
+ return False, None
206
+
207
+ # If LoRA ID changed, update weights
208
+ if lora_id != current_lora_id:
209
+ # If we had a LoRA loaded, unload it first
210
+ if current_lora_id is not None:
211
+ logger.info(f"Unloading current LoRA")
212
+ try:
213
+ pipe.unload_lora_weights()
214
+ except Exception as e:
215
+ logger.error(f"Error unloading LoRA weights: {e}")
216
+
217
+ # Load new LoRA weights
218
+ logger.info("Using a LoRA")
219
+ try:
220
+ pipe.load_lora_weights(lora_path, weight_name=str(lora_path), adapter_name="default")
221
+ current_lora_id = lora_id
222
+ except Exception as e:
223
+ logger.error(f"Error loading LoRA weights: {e}")
224
+ return False, None
225
+ else:
226
+ logger.info(f"Using currently loaded LoRA with ID")
227
+
228
+ return True, lora_path
229
+
230
+ return False, None
231
+
232
+ def generate_video(
233
+ prompt: str,
234
+ negative_prompt: str,
235
+ prompt_prefix: str,
236
+ width: int,
237
+ height: int,
238
+ num_frames: int,
239
+ guidance_scale: float,
240
+ flow_shift: float,
241
+ lora_id: Optional[str],
242
+ lora_weight: float,
243
+ inference_steps: int,
244
+ fps: int = 16,
245
+ seed: int = -1,
246
+ enable_cpu_offload: bool = True,
247
+ conditioning_image: Optional[str] = None,
248
+ progress=gr.Progress()
249
+ ) -> str:
250
+ """Generate a video using the Wan model with optional LoRA weights
251
+
252
+ Args:
253
+ prompt: Text prompt for generation
254
+ negative_prompt: Negative text prompt
255
+ prompt_prefix: Prefix to add to all prompts
256
+ width: Output video width
257
+ height: Output video height
258
+ num_frames: Number of frames to generate
259
+ guidance_scale: Classifier-free guidance scale
260
+ flow_shift: Flow shift parameter for scheduler
261
+ lora_id: UUID of LoRA file to use
262
+ lora_weight: Weight of LoRA contribution
263
+ inference_steps: Number of inference steps
264
+ fps: Frames per second for output video
265
+ seed: Random seed (-1 for random)
266
+ enable_cpu_offload: Whether to enable CPU offload for VRAM optimization
267
+ conditioning_image: Path to conditioning image for image-to-video (not used in this app)
268
+ progress: Gradio progress callback
269
+
270
+ Returns:
271
+ str: Video path
272
+ """
273
+ global pipe, current_lora_id # Move the global declaration to the top of the function
274
+
275
+ try:
276
+ # Progress 0-5%: Initialize and check inputs
277
+ progress(0.00, desc="Initializing generation")
278
+
279
+ # Add prefix to prompt
280
+ progress(0.02, desc="Processing prompt")
281
+ if prompt_prefix and not prompt.startswith(prompt_prefix):
282
+ full_prompt = f"{prompt_prefix}{prompt}"
283
+ else:
284
+ full_prompt = prompt
285
+
286
+ # Create correct num_frames (should be 8*k + 1)
287
+ adjusted_num_frames = ((num_frames - 1) // 8) * 8 + 1
288
+ if adjusted_num_frames != num_frames:
289
+ logger.info(f"Adjusted number of frames from {num_frames} to {adjusted_num_frames} to match model requirements")
290
+ num_frames = adjusted_num_frames
291
+
292
+ # Set up random seed
293
+ progress(0.03, desc="Setting up random seed")
294
+ if seed == -1:
295
+ seed = random.randint(0, 2**32 - 1)
296
+ logger.info(f"Using randomly generated seed: {seed}")
297
+
298
+ # Set random seeds for reproducibility
299
+ random.seed(seed)
300
+ np.random.seed(seed)
301
+ torch.manual_seed(seed)
302
+ generator = torch.Generator(device="cuda")
303
+ generator = generator.manual_seed(seed)
304
+
305
+ # Progress 5-25%: Get or create pipeline
306
+ progress(0.05, desc="Preparing model")
307
+ pipe = get_or_create_pipeline(enable_cpu_offload, flow_shift)
308
+
309
+ # Progress 25-40%: Manage LoRA weights
310
+ progress(0.25, desc="Managing LoRA weights")
311
+ using_lora, lora_path = manage_lora_weights(pipe, lora_id, lora_weight)
312
+
313
+ # Create temporary file for the output
314
+ with tempfile.NamedTemporaryFile(suffix='.mp4', delete=False) as temp_file:
315
+ output_path = temp_file.name
316
+
317
+ # Progress 40-90%: Generate the video
318
+ progress(0.40, desc="Starting video generation")
319
+
320
+ # Set up timing for generation
321
+ start_time = torch.cuda.Event(enable_timing=True)
322
+ end_time = torch.cuda.Event(enable_timing=True)
323
+
324
+ start_time.record()
325
+ # Update progress once before generation starts
326
+ progress(0.45, desc="Running diffusion process")
327
+
328
+ # Generate the video without callback
329
+ output = pipe(
330
+ prompt=full_prompt,
331
+ negative_prompt=negative_prompt,
332
+ height=height,
333
+ width=width,
334
+ num_frames=num_frames,
335
+ guidance_scale=guidance_scale,
336
+ num_inference_steps=inference_steps,
337
+ generator=generator,
338
+ # noo! don't do this!
339
+ # we will implement the lora weight / scale later
340
+ #cross_attention_kwargs={"scale": lora_weight} if using_lora else None
341
+ ).frames[0]
342
+
343
+ # Update progress after generation completes
344
+ progress(0.90, desc="Generation complete")
345
+
346
+ end_time.record()
347
+ torch.cuda.synchronize()
348
+ generation_time = start_time.elapsed_time(end_time) / 1000 # Convert to seconds
349
+
350
+ logger.info(f"Video generation completed in {format_time(generation_time)}")
351
+
352
+ # Progress 90-95%: Export video
353
+ progress(0.90, desc="Exporting video")
354
+ export_to_video(output, output_path, fps=fps)
355
+
356
+ # Progress 95-100%: Save output and clean up
357
+ progress(0.95, desc="Saving video")
358
+
359
+ # Save a copy to our output directory with UUID for potential future reference
360
+ output_id = str(uuid.uuid4())
361
+ saved_output_path = OUTPUT_PATH / f"{output_id}.mp4"
362
+ shutil.copy(output_path, saved_output_path)
363
+ logger.info(f"Saved video with ID: {output_id}")
364
+
365
+ # No longer clear the pipeline since we're reusing it
366
+ # Just clean up local variables
367
+ progress(0.98, desc="Cleaning up resources")
368
+
369
+ progress(1.0, desc="Generation complete")
370
+
371
+ return output_path
372
+
373
+ except Exception as e:
374
+ import traceback
375
+ error_msg = f"Error generating video: {str(e)}\n{traceback.format_exc()}"
376
+ logger.error(error_msg)
377
+
378
+ # Clean up CUDA memory on error
379
+ if pipe is not None:
380
+ # Try to unload any LoRA weights on error
381
+ if current_lora_id is not None:
382
+ try:
383
+ pipe.unload_lora_weights()
384
+ current_lora_id = None
385
+ except:
386
+ pass
387
+
388
+ # Release the pipeline on critical errors
389
+ try:
390
+ pipe = None
391
+ torch.cuda.empty_cache()
392
+ except:
393
+ pass
394
+
395
+ # Re-raise as Gradio error for UI display
396
+ raise gr.Error(f"Error generating video: {str(e)}")
397
+
398
+ # Create the Gradio app
399
+ with gr.Blocks(title="Video Generation API") as app:
400
+
401
+ with gr.Tabs():
402
+ # LoRA Upload Tab
403
+ with gr.TabItem("1️⃣ Upload LoRA"):
404
+ gr.Markdown("## Upload LoRA Weights")
405
+ gr.Markdown("Upload your custom LoRA weights file to use for generation. The file will be automatically stored and you'll receive a unique hash-based ID.")
406
+
407
+ with gr.Row():
408
+ lora_file = gr.File(label="LoRA File (safetensors format)")
409
+
410
+ with gr.Row():
411
+ lora_id_output = gr.Textbox(label="LoRA Hash ID (use this in the generation tab)", interactive=False)
412
+
413
+ # This will be connected after all components are defined
414
+
415
+ # Video Generation Tab
416
+ with gr.TabItem("2️⃣ Generate Video"):
417
+
418
+ with gr.Row():
419
+ with gr.Column(scale=1):
420
+ # Input parameters
421
+ prompt = gr.Textbox(
422
+ label="Prompt",
423
+ placeholder="Enter your prompt here...",
424
+ lines=3
425
+ )
426
+
427
+ negative_prompt = gr.Textbox(
428
+ label="Negative Prompt",
429
+ placeholder="Enter negative prompt here...",
430
+ lines=3,
431
+ value="worst quality, low quality, blurry, jittery, distorted, ugly, deformed, disfigured, messy background"
432
+ )
433
+
434
+ prompt_prefix = gr.Textbox(
435
+ label="Prompt Prefix",
436
+ placeholder="Prefix to add to all prompts",
437
+ value=DEFAULT_PROMPT_PREFIX
438
+ )
439
+
440
+ with gr.Row():
441
+ width = gr.Slider(
442
+ label="Width",
443
+ minimum=256,
444
+ maximum=1280,
445
+ step=8,
446
+ value=1280
447
+ )
448
+
449
+ height = gr.Slider(
450
+ label="Height",
451
+ minimum=256,
452
+ maximum=720,
453
+ step=8,
454
+ value=720
455
+ )
456
+
457
+ with gr.Row():
458
+ num_frames = gr.Slider(
459
+ label="Number of Frames",
460
+ minimum=9,
461
+ maximum=257,
462
+ step=8,
463
+ value=49
464
+ )
465
+
466
+ fps = gr.Slider(
467
+ label="FPS",
468
+ minimum=1,
469
+ maximum=60,
470
+ step=1,
471
+ value=16
472
+ )
473
+
474
+ with gr.Row():
475
+ guidance_scale = gr.Slider(
476
+ label="Guidance Scale",
477
+ minimum=1.0,
478
+ maximum=10.0,
479
+ step=0.1,
480
+ value=5.0
481
+ )
482
+
483
+ flow_shift = gr.Slider(
484
+ label="Flow Shift",
485
+ minimum=0.0,
486
+ maximum=10.0,
487
+ step=0.1,
488
+ value=3.0
489
+ )
490
+
491
+ lora_id = gr.Textbox(
492
+ label="LoRA ID (from upload tab)",
493
+ placeholder="Enter your LoRA ID here...",
494
+ )
495
+
496
+ with gr.Row():
497
+ lora_weight = gr.Slider(
498
+ label="LoRA Weight",
499
+ minimum=0.0,
500
+ maximum=1.0,
501
+ step=0.01,
502
+ value=0.7
503
+ )
504
+
505
+ inference_steps = gr.Slider(
506
+ label="Inference Steps",
507
+ minimum=1,
508
+ maximum=100,
509
+ step=1,
510
+ value=30
511
+ )
512
+
513
+ seed = gr.Slider(
514
+ label="Generation Seed (-1 for random)",
515
+ minimum=-1,
516
+ maximum=2147483647, # 2^31 - 1
517
+ step=1,
518
+ value=-1
519
+ )
520
+
521
+ enable_cpu_offload = gr.Checkbox(
522
+ label="Enable Model CPU Offload (for low-VRAM GPUs)",
523
+ value=False
524
+ )
525
+
526
+ generate_btn = gr.Button(
527
+ "Generate Video",
528
+ variant="primary"
529
+ )
530
+
531
+ with gr.Column(scale=1):
532
+ # Output component - just the video preview
533
+ preview_video = gr.Video(
534
+ label="Generated Video",
535
+ interactive=False
536
+ )
537
+
538
+ # Connect the generate button
539
+ generate_btn.click(
540
+ fn=generate_video,
541
+ inputs=[
542
+ prompt,
543
+ negative_prompt,
544
+ prompt_prefix,
545
+ width,
546
+ height,
547
+ num_frames,
548
+ guidance_scale,
549
+ flow_shift,
550
+ lora_id,
551
+ lora_weight,
552
+ inference_steps,
553
+ fps,
554
+ seed,
555
+ enable_cpu_offload
556
+ ],
557
+ outputs=[
558
+ preview_video
559
+ ]
560
+ )
561
+
562
+ # Connect LoRA upload to both display fields
563
+ lora_file.change(
564
+ fn=upload_lora_file,
565
+ inputs=[lora_file],
566
+ outputs=[lora_id_output, lora_id]
567
+ )
568
+
569
+ # Launch the app
570
+ if __name__ == "__main__":
571
+ app.launch()