Duskfallcrew commited on
Commit
5452465
·
verified ·
1 Parent(s): bcbd92e

Upload 2 files

Browse files

Used Windsurf to fix a bunch of stuff

Files changed (2) hide show
  1. app.py +398 -69
  2. requirements.txt +30 -12
app.py CHANGED
@@ -14,6 +14,12 @@ from urllib.parse import urlparse, unquote
14
  from pathlib import Path
15
  import tempfile
16
  from tqdm import tqdm
 
 
 
 
 
 
17
 
18
  # ---------------------- UTILITY FUNCTIONS ----------------------
19
 
@@ -94,6 +100,7 @@ def is_diffusers_model(model_path):
94
  return required_folders.issubset(set(os.listdir(model_path))) and os.path.isfile(os.path.join(model_path, "model_index.json"))
95
 
96
  # ---------------------- MODEL UTIL (From library.sdxl_model_util) ----------------------
 
97
  def load_models_from_sdxl_checkpoint(sdxl_base_id, checkpoint_path, device):
98
  """Loads SDXL model components from a checkpoint file."""
99
  text_encoder1 = CLIPTextModel.from_pretrained(sdxl_base_id, subfolder="text_encoder").to(device)
@@ -314,38 +321,300 @@ def save_sdxl_as_diffusers(args, text_encoder1, text_encoder2, vae, unet, save_d
314
  with output_widget:
315
  print(f"Model saved as {save_dtype}.")
316
 
317
- def convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, output_widget):
318
- """Main conversion function."""
319
- class Args: # Defining Args locally within convert_model
320
- def __init__(self, model_to_load, save_precision_as, epoch, global_step, reference_model, output_path, fp16):
321
- self.model_to_load = model_to_load
322
- self.save_precision_as = save_precision_as
323
- self.epoch = epoch
324
- self.global_step = global_step
325
- self.reference_model = reference_model
326
- self.output_path = output_path #Using output_path even if hardcoded
327
- self.fp16 = fp16
328
-
329
- with tempfile.TemporaryDirectory() as tmpdirname:
330
- args = Args(model_to_load, save_precision_as, epoch, global_step, reference_model, tmpdirname, fp16)
331
- args.model_to_save = increment_filename(os.path.splitext(args.model_to_load)[0] + ".safetensors")
332
 
333
- try:
334
- load_dtype = torch.float16 if fp16 else None
335
- save_dtype = get_save_dtype(save_precision_as)
 
 
 
 
336
 
337
- is_load_checkpoint = determine_load_checkpoint(model_to_load)
338
- is_save_checkpoint = not is_load_checkpoint # reverse of load model
 
339
 
340
- loaded_model_data = load_sdxl_model(args, is_load_checkpoint, load_dtype, output_widget)
341
- convert_and_save_sdxl_model(args, is_save_checkpoint, loaded_model_data, save_dtype, output_widget)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
342
 
343
- with output_widget:
344
- return f"Conversion complete. Model saved to {args.model_to_save}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
 
346
- except Exception as e:
347
- with output_widget:
348
- return f"Conversion failed: {e}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
349
 
350
  def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_private, output_widget):
351
  """Uploads a model to the Hugging Face Hub."""
@@ -428,53 +697,113 @@ def increment_filename(filename):
428
  return new_name
429
  counter += 1
430
 
431
- with gr.Blocks() as demo:
432
-
433
- # Add initial warnings (only once)
434
- gr.Markdown(f"""
435
- ## **⚠️ IMPORTANT WARNINGS ⚠️**
436
- This App is Coded by an LLM partially, and for more information please go here: [Ktiseos Nyx](https://github.com/Ktiseos-Nyx/Sdxl-to-diffusers). The colab edition of this may indeed break AUP. This space is running on CPU and in theory SHOULD work, but may be slow. Earth and Dusk/ Ktiseos Nyx does not have the enterprise budget for ZERO GPU or any gpu sadly! Thank you to the community, John6666 especially for coming to aid when gemini would NOT fix the requirements.
 
 
 
 
 
 
 
 
 
 
 
 
 
437
  """)
438
- gr.HTML(
439
- """
440
- <a href='https://ko-fi.com/Z8Z8L4EO' target='_blank'>
441
- <img height='36' style='border:0px;height:36px;' src='https://storage.ko-fi.com/cdn/kofi3.png?v=6' border='0' alt='Buy Me a Coffee at ko-fi.com' />
442
- </a>
443
- """
444
- )
445
- gr.Markdown(f"""**Understanding the 'Model to Load' Input:**
446
-
447
- This field can accept any of the following:
448
- * A Hugging Face model identifier (e.g., `stabilityai/stable-diffusion-xl-base-1.0`).
449
- * A direct URL to a .ckpt or .safetensors model file.
450
- * **Important:** Huggingface direct links need to end as /resolve/main/ and the name of the model after.""")
451
- model_to_load = gr.Textbox(label="Model to Load (Checkpoint or Diffusers)", placeholder="Path to model")
452
- with gr.Row():
453
- save_precision_as = gr.Dropdown(
454
- choices=["fp16", "bf16", "float"], value="fp16", label="Save Precision As"
455
- )
456
- fp16 = gr.Checkbox(label="Load as fp16 (Diffusers only)")
457
  with gr.Row():
458
- epoch = gr.Number(value=0, label="Epoch to Write (Checkpoint)")
459
- global_step = gr.Number(value=0, label="Global Step to Write (Checkpoint)")
 
 
 
 
 
 
 
 
 
 
 
460
 
461
- reference_model = gr.Textbox(label="Reference Diffusers Model",
462
- placeholder="e.g., stabilityai/stable-diffusion-xl-base-1.0")
 
 
 
 
 
 
 
 
 
 
 
463
 
464
- gr.Markdown("## Hugging Face Hub Configuration")
465
- gr.Markdown("Your Access Token can be found here: [Token](https://huggingface.co/settings/tokens) **⚠️ IMPORTANT WARNINGS ⚠️** Do not share your token, and with reason you should refresh your token after use for security.")
466
- hf_token = gr.Textbox(type="password", label="Hugging Face Token", placeholder="Your Hugging Face write token") #THIS IS NEEDED
467
- with gr.Row():
468
- orgs_name = gr.Textbox(label="Organization Name (Optional)", placeholder="Your organization name")
469
- model_name = gr.Textbox(label="Model Name", placeholder="The name of your model on Hugging Face")
470
- make_private = gr.Checkbox(label="Make Repository Private", value=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
471
 
472
- convert_button = gr.Button("Convert and Upload")
473
- output = gr.Markdown()
 
 
 
474
 
475
- convert_button.click(fn=main,
476
- inputs=[model_to_load, save_precision_as, epoch, global_step, reference_model,
477
- fp16, hf_token, orgs_name, model_name, make_private],
478
- outputs=output)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
479
 
480
  demo.launch()
 
14
  from pathlib import Path
15
  import tempfile
16
  from tqdm import tqdm
17
+ import psutil
18
+ import math
19
+ import shutil
20
+ import hashlib
21
+ from datetime import datetime
22
+ from typing import Dict, List, Optional
23
 
24
  # ---------------------- UTILITY FUNCTIONS ----------------------
25
 
 
100
  return required_folders.issubset(set(os.listdir(model_path))) and os.path.isfile(os.path.join(model_path, "model_index.json"))
101
 
102
  # ---------------------- MODEL UTIL (From library.sdxl_model_util) ----------------------
103
+
104
  def load_models_from_sdxl_checkpoint(sdxl_base_id, checkpoint_path, device):
105
  """Loads SDXL model components from a checkpoint file."""
106
  text_encoder1 = CLIPTextModel.from_pretrained(sdxl_base_id, subfolder="text_encoder").to(device)
 
321
  with output_widget:
322
  print(f"Model saved as {save_dtype}.")
323
 
324
+ def get_save_dtype(precision):
325
+ """
326
+ Convert precision string to torch dtype
327
+ """
328
+ if precision == "float32" or precision == "fp32":
329
+ return torch.float32
330
+ elif precision == "float16" or precision == "fp16":
331
+ return torch.float16
332
+ elif precision == "bfloat16" or precision == "bf16":
333
+ return torch.bfloat16
334
+ else:
335
+ raise ValueError(f"Unsupported precision: {precision}")
 
 
 
336
 
337
+ def get_file_size(file_path):
338
+ """Get file size in GB."""
339
+ try:
340
+ size_bytes = Path(file_path).stat().st_size
341
+ return size_bytes / (1024 * 1024 * 1024) # Convert to GB
342
+ except:
343
+ return None
344
 
345
+ def get_available_memory():
346
+ """Get available system memory in GB."""
347
+ return psutil.virtual_memory().available / (1024 * 1024 * 1024)
348
 
349
+ def estimate_memory_requirements(model_path, precision):
350
+ """Estimate memory requirements for model conversion."""
351
+ try:
352
+ # Base memory requirement for SDXL
353
+ base_memory = 8 # GB
354
+
355
+ # Get model size if local file
356
+ model_size = get_file_size(model_path) if not is_valid_url(model_path) else None
357
+
358
+ # Adjust for precision
359
+ memory_multiplier = 1.0 if precision in ["float16", "fp16", "bfloat16", "bf16"] else 2.0
360
+
361
+ # Calculate total required memory
362
+ required_memory = (base_memory + (model_size if model_size else 12)) * memory_multiplier
363
+
364
+ return required_memory
365
+ except:
366
+ return 16 # Default safe estimate
367
 
368
+ def validate_model(model_path, precision):
369
+ """
370
+ Validate the model before conversion.
371
+ Returns (is_valid, message)
372
+ """
373
+ try:
374
+ # Check if it's a URL
375
+ if is_valid_url(model_path):
376
+ try:
377
+ response = requests.head(model_path)
378
+ if response.status_code != 200:
379
+ return False, "❌ Invalid URL or model not accessible"
380
+ if 'content-length' in response.headers:
381
+ size_gb = int(response.headers['content-length']) / (1024 * 1024 * 1024)
382
+ if size_gb < 0.1:
383
+ return False, "❌ File too small to be a valid model"
384
+ except:
385
+ return False, "❌ Error checking URL"
386
+
387
+ # Check if it's a local file
388
+ elif not model_path.startswith("stabilityai/") and not Path(model_path).exists():
389
+ return False, "❌ Model file not found"
390
+
391
+ # Check available memory
392
+ available_memory = get_available_memory()
393
+ required_memory = estimate_memory_requirements(model_path, precision)
394
+
395
+ if available_memory < required_memory:
396
+ return False, f"❌ Insufficient memory. Need {math.ceil(required_memory)}GB, but only {math.ceil(available_memory)}GB available"
397
+
398
+ # Memory warning
399
+ memory_message = ""
400
+ if available_memory < required_memory * 1.5:
401
+ memory_message = "⚠️ Memory is tight. Consider closing other applications."
402
+
403
+ return True, f"✅ Model validated successfully. {memory_message}"
404
+
405
+ except Exception as e:
406
+ return False, f"❌ Validation error: {str(e)}"
407
 
408
+ def cleanup_temp_files(directory=None):
409
+ """Clean up temporary files after conversion."""
410
+ try:
411
+ if directory:
412
+ shutil.rmtree(directory, ignore_errors=True)
413
+ # Clean up other temp files
414
+ temp_pattern = "*.tmp"
415
+ for temp_file in Path(".").glob(temp_pattern):
416
+ temp_file.unlink()
417
+ except Exception as e:
418
+ print(f"Warning: Error during cleanup: {e}")
419
+
420
+ def convert_model(model_to_load, save_precision_as, epoch, global_step, reference_model, fp16, output_widget):
421
+ """Convert the model between different formats."""
422
+ temp_dir = None
423
+ history = ConversionHistory()
424
+
425
+ try:
426
+ print("Starting model conversion...")
427
+ update_progress(output_widget, "⏳ Initializing conversion process...", 0)
428
+
429
+ # Get optimization suggestions
430
+ available_memory = get_available_memory()
431
+ auto_suggestions = get_auto_optimization_suggestions(model_to_load, save_precision_as, available_memory)
432
+ history_suggestions = history.get_optimization_suggestions(model_to_load)
433
+
434
+ # Display suggestions
435
+ if auto_suggestions or history_suggestions:
436
+ print("\n🔍 Optimization Suggestions:")
437
+ for suggestion in auto_suggestions + history_suggestions:
438
+ print(suggestion)
439
+ print("\n")
440
+
441
+ # Validate model
442
+ is_valid, message = validate_model(model_to_load, save_precision_as)
443
+ if not is_valid:
444
+ raise ValueError(message)
445
+ print(message)
446
+
447
+ args = SimpleNamespace()
448
+ args.model_to_load = model_to_load
449
+ args.save_precision_as = save_precision_as
450
+ args.epoch = epoch
451
+ args.global_step = global_step
452
+ args.reference_model = reference_model
453
+ args.fp16 = fp16
454
+
455
+ update_progress(output_widget, "🔍 Validating input model...", 10)
456
+ args.model_to_save = increment_filename(os.path.splitext(args.model_to_load)[0] + ".safetensors")
457
+
458
+ save_dtype = get_save_dtype(save_precision_as)
459
+
460
+ # Create temporary directory for processing
461
+ temp_dir = tempfile.mkdtemp(prefix="sdxl_conversion_")
462
+
463
+ update_progress(output_widget, "📥 Loading model components...", 30)
464
+ is_load_checkpoint = determine_load_checkpoint(args.model_to_load)
465
+ if is_load_checkpoint is None:
466
+ raise ValueError("Invalid model format or path")
467
+
468
+ update_progress(output_widget, "🔄 Converting model...", 50)
469
+ loaded_model_data = load_sdxl_model(args, is_load_checkpoint, save_dtype, output_widget)
470
+
471
+ update_progress(output_widget, "💾 Saving converted model...", 80)
472
+ is_save_checkpoint = args.model_to_save.endswith(get_supported_extensions())
473
+ result = convert_and_save_sdxl_model(args, is_save_checkpoint, loaded_model_data, save_dtype, output_widget)
474
+
475
+ update_progress(output_widget, "✅ Conversion completed!", 100)
476
+ print(f"Model conversion completed. Saved to: {args.model_to_save}")
477
+
478
+ # Verify the converted model
479
+ is_valid, verify_message = verify_model_structure(args.model_to_save)
480
+ if not is_valid:
481
+ raise ValueError(verify_message)
482
+ print(verify_message)
483
+
484
+ # Record successful conversion
485
+ history.add_entry(
486
+ model_to_load,
487
+ {
488
+ 'precision': save_precision_as,
489
+ 'fp16': fp16,
490
+ 'epoch': epoch,
491
+ 'global_step': global_step
492
+ },
493
+ True,
494
+ "Conversion completed successfully"
495
+ )
496
+
497
+ cleanup_temp_files(temp_dir)
498
+ return result
499
+
500
+ except Exception as e:
501
+ if temp_dir:
502
+ cleanup_temp_files(temp_dir)
503
+
504
+ # Record failed conversion
505
+ history.add_entry(
506
+ model_to_load,
507
+ {
508
+ 'precision': save_precision_as,
509
+ 'fp16': fp16,
510
+ 'epoch': epoch,
511
+ 'global_step': global_step
512
+ },
513
+ False,
514
+ str(e)
515
+ )
516
+
517
+ error_msg = f"❌ Error during model conversion: {str(e)}"
518
+ print(error_msg)
519
+ return error_msg
520
+
521
+ def update_progress(output_widget, message, progress):
522
+ """Update the progress bar and message in the UI."""
523
+ progress_bar = "▓" * (progress // 5) + "░" * ((100 - progress) // 5)
524
+ print(f"{message}\n[{progress_bar}] {progress}%")
525
+
526
+ class ConversionHistory:
527
+ def __init__(self, history_file="conversion_history.json"):
528
+ self.history_file = history_file
529
+ self.history = self._load_history()
530
+
531
+ def _load_history(self) -> List[Dict]:
532
+ try:
533
+ with open(self.history_file, 'r') as f:
534
+ return json.load(f)
535
+ except (FileNotFoundError, json.JSONDecodeError):
536
+ return []
537
+
538
+ def _save_history(self):
539
+ with open(self.history_file, 'w') as f:
540
+ json.dump(self.history, f, indent=2)
541
+
542
+ def add_entry(self, model_path: str, settings: Dict, success: bool, message: str):
543
+ entry = {
544
+ 'timestamp': datetime.now().isoformat(),
545
+ 'model_path': model_path,
546
+ 'settings': settings,
547
+ 'success': success,
548
+ 'message': message
549
+ }
550
+ self.history.append(entry)
551
+ self._save_history()
552
+
553
+ def get_optimization_suggestions(self, model_path: str) -> List[str]:
554
+ """Analyze history and provide optimization suggestions."""
555
+ suggestions = []
556
+ similar_conversions = [h for h in self.history if h['model_path'] == model_path]
557
+
558
+ if similar_conversions:
559
+ success_rate = sum(1 for h in similar_conversions if h['success']) / len(similar_conversions)
560
+ if success_rate < 1.0:
561
+ failed_attempts = [h for h in similar_conversions if not h['success']]
562
+ if any('memory' in h['message'].lower() for h in failed_attempts):
563
+ suggestions.append("⚠️ Previous attempts had memory issues. Consider using fp16 precision.")
564
+ if any('timeout' in h['message'].lower() for h in failed_attempts):
565
+ suggestions.append("⚠️ Previous attempts timed out. Try breaking down the conversion process.")
566
+
567
+ return suggestions
568
+
569
+ def verify_model_structure(model_path: str) -> tuple[bool, str]:
570
+ """Verify the structure of the converted model."""
571
+ try:
572
+ if model_path.endswith('.safetensors'):
573
+ # Verify safetensors structure
574
+ with safe_open(model_path, framework="pt") as f:
575
+ if not f.keys():
576
+ return False, "❌ Invalid safetensors file: no tensors found"
577
+
578
+ # Check for essential components
579
+ required_keys = ["model.diffusion_model", "first_stage_model"]
580
+ missing_keys = []
581
+
582
+ # Load and check key components
583
+ state_dict = load_file(model_path)
584
+ for key in required_keys:
585
+ if not any(k.startswith(key) for k in state_dict.keys()):
586
+ missing_keys.append(key)
587
+
588
+ if missing_keys:
589
+ return False, f"❌ Missing essential components: {', '.join(missing_keys)}"
590
+
591
+ return True, "✅ Model structure verified successfully"
592
+ except Exception as e:
593
+ return False, f"❌ Model verification failed: {str(e)}"
594
+
595
+ def get_auto_optimization_suggestions(model_path: str, precision: str, available_memory: float) -> List[str]:
596
+ """Generate automatic optimization suggestions based on model and system characteristics."""
597
+ suggestions = []
598
+
599
+ # Memory-based suggestions
600
+ if available_memory < 16:
601
+ suggestions.append("💡 Limited memory detected. Consider these options:")
602
+ suggestions.append(" - Use fp16 precision to reduce memory usage")
603
+ suggestions.append(" - Close other applications before conversion")
604
+ suggestions.append(" - Use a machine with more RAM if available")
605
+
606
+ # Precision-based suggestions
607
+ if precision == "float32" and available_memory < 32:
608
+ suggestions.append("💡 Consider using fp16 precision for better memory efficiency")
609
+
610
+ # Model size-based suggestions
611
+ model_size = get_file_size(model_path) if not is_valid_url(model_path) else None
612
+ if model_size and model_size > 10:
613
+ suggestions.append("💡 Large model detected. Recommendations:")
614
+ suggestions.append(" - Ensure stable internet connection for URL downloads")
615
+ suggestions.append(" - Consider breaking down the conversion process")
616
+
617
+ return suggestions
618
 
619
  def upload_to_huggingface(model_path, hf_token, orgs_name, model_name, make_private, output_widget):
620
  """Uploads a model to the Hugging Face Hub."""
 
697
  return new_name
698
  counter += 1
699
 
700
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
701
+ gr.Markdown("""
702
+ # 🎨 SDXL Model Converter
703
+ Convert SDXL models between different formats and precisions. Works on CPU!
704
+
705
+ ### 📥 Input Sources Supported:
706
+ - Local model files (.safetensors, .ckpt, etc.)
707
+ - Direct URLs to model files
708
+ - Hugging Face model repositories (e.g., 'stabilityai/stable-diffusion-xl-base-1.0')
709
+
710
+ ### ℹ️ Important Notes:
711
+ - This tool runs on CPU, though conversion might be slower than on GPU
712
+ - For Hugging Face uploads, you need a **WRITE** token (not a read token)
713
+ - Get your HF token here: https://huggingface.co/settings/tokens
714
+
715
+ ### 💾 Memory Usage Tips:
716
+ - Use FP16 precision when possible to reduce memory usage
717
+ - Close other applications during conversion
718
+ - For large models, ensure you have at least 16GB of RAM
719
  """)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
720
  with gr.Row():
721
+ with gr.Column():
722
+ model_to_load = gr.Textbox(
723
+ label="Model Path/URL/HF Repo",
724
+ placeholder="Enter local path, URL, or Hugging Face model ID (e.g., stabilityai/stable-diffusion-xl-base-1.0)",
725
+ type="text"
726
+ )
727
+
728
+ save_precision_as = gr.Dropdown(
729
+ choices=["float32", "float16", "bfloat16"],
730
+ value="float16",
731
+ label="Save Precision",
732
+ info="Choose model precision (float16 recommended for most cases)"
733
+ )
734
 
735
+ with gr.Row():
736
+ epoch = gr.Number(
737
+ value=0,
738
+ label="Epoch",
739
+ precision=0,
740
+ info="Optional: Set epoch number for the saved model"
741
+ )
742
+ global_step = gr.Number(
743
+ value=0,
744
+ label="Global Step",
745
+ precision=0,
746
+ info="Optional: Set training step for the saved model"
747
+ )
748
 
749
+ reference_model = gr.Textbox(
750
+ label="Reference Model (Optional)",
751
+ placeholder="Path to reference model for scheduler config",
752
+ info="Optional: Used to copy scheduler configuration"
753
+ )
754
+
755
+ fp16 = gr.Checkbox(
756
+ label="Load in FP16",
757
+ value=True,
758
+ info="Load model in half precision (recommended for CPU usage)"
759
+ )
760
+
761
+ # Hugging Face Upload Section
762
+ gr.Markdown("### Upload to Hugging Face (Optional)")
763
+
764
+ hf_token = gr.Textbox(
765
+ label="Hugging Face Token",
766
+ placeholder="Enter your WRITE token from huggingface.co/settings/tokens",
767
+ type="password",
768
+ info=" Must be a WRITE token, not a read token!"
769
+ )
770
+
771
+ with gr.Row():
772
+ orgs_name = gr.Textbox(
773
+ label="Organization Name",
774
+ placeholder="Optional: Your organization name",
775
+ info="Leave empty to use your personal account"
776
+ )
777
+ model_name = gr.Textbox(
778
+ label="Model Name",
779
+ placeholder="Name for your uploaded model",
780
+ info="The name your model will have on Hugging Face"
781
+ )
782
 
783
+ make_private = gr.Checkbox(
784
+ label="Make Private",
785
+ value=True,
786
+ info="Keep the uploaded model private on Hugging Face"
787
+ )
788
 
789
+ with gr.Column():
790
+ output = gr.Markdown(label="Output")
791
+ convert_btn = gr.Button("Convert Model", variant="primary")
792
+ convert_btn.click(
793
+ fn=main,
794
+ inputs=[
795
+ model_to_load,
796
+ save_precision_as,
797
+ epoch,
798
+ global_step,
799
+ reference_model,
800
+ fp16,
801
+ hf_token,
802
+ orgs_name,
803
+ model_name,
804
+ make_private
805
+ ],
806
+ outputs=output
807
+ )
808
 
809
  demo.launch()
requirements.txt CHANGED
@@ -1,12 +1,30 @@
1
- numpy==1.26.4
2
- torch>=2.1.0
3
- diffusers
4
- transformers
5
- einops
6
- open-clip-torch
7
- gradio
8
- safetensors
9
- accelerate
10
- requests
11
- tqdm
12
- gdown
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Core dependencies
2
+ numpy>=1.26.4
3
+ torch>=2.0.0
4
+ diffusers>=0.21.4
5
+ transformers>=4.30.0
6
+ einops>=0.7.0
7
+ open-clip-torch>=2.23.0
8
+
9
+ # UI and interface
10
+ gradio>=3.50.2
11
+
12
+ # Model handling
13
+ safetensors>=0.3.1
14
+ accelerate>=0.23.0
15
+
16
+ # Utilities
17
+ psutil>=5.9.0
18
+ requests>=2.31.0
19
+ tqdm>=4.65.0
20
+ gdown>=4.7.1
21
+
22
+ # Type checking and validation
23
+ typing-extensions>=4.8.0
24
+ pydantic>=2.0.0
25
+
26
+ # File handling and compression
27
+ fsspec>=2023.0.0
28
+ filelock>=3.13.0
29
+
30
+ # Note: This app is hosted on Hugging Face Spaces, so ensure compatibility with their environment.