Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -60,7 +60,7 @@ def setup_model(loaded_model_setup, prompt, model, seed, num_iterations, enable_
|
|
60 |
raise gr.Error("You forgot to provide a prompt !")
|
61 |
|
62 |
print(f"LOADED_MODEL SETUP: {loaded_model_setup}")
|
63 |
-
|
64 |
"""Clear CUDA memory before starting the training."""
|
65 |
torch.cuda.empty_cache() # Free up cached memory
|
66 |
gc.collect()
|
@@ -95,7 +95,7 @@ def setup_model(loaded_model_setup, prompt, model, seed, num_iterations, enable_
|
|
95 |
|
96 |
if model == "flux":
|
97 |
args.cpu_offloading = True
|
98 |
-
args.enable_multi_apply= True
|
99 |
args.multi_step_model = "flux"
|
100 |
|
101 |
# Check if args are the same as the loaded_model_setup except for the prompt
|
@@ -115,15 +115,17 @@ def setup_model(loaded_model_setup, prompt, model, seed, num_iterations, enable_
|
|
115 |
|
116 |
return f"{model} model already loaded with the same configuration.", loaded_model_setup
|
117 |
|
118 |
-
#
|
119 |
try:
|
|
|
120 |
args, trainer, device, dtype, shape, enable_grad, multi_apply_fn, settings = setup(args, loaded_model_setup)
|
121 |
new_loaded_setup = [args, trainer, device, dtype, shape, enable_grad, multi_apply_fn, settings]
|
122 |
-
return f"{model} model loaded
|
123 |
|
124 |
except Exception as e:
|
125 |
-
print(f"
|
126 |
-
return f"
|
|
|
127 |
|
128 |
def generate_image(setup_args, num_iterations):
|
129 |
torch.cuda.empty_cache() # Free up cached memory
|
|
|
60 |
raise gr.Error("You forgot to provide a prompt !")
|
61 |
|
62 |
print(f"LOADED_MODEL SETUP: {loaded_model_setup}")
|
63 |
+
|
64 |
"""Clear CUDA memory before starting the training."""
|
65 |
torch.cuda.empty_cache() # Free up cached memory
|
66 |
gc.collect()
|
|
|
95 |
|
96 |
if model == "flux":
|
97 |
args.cpu_offloading = True
|
98 |
+
args.enable_multi_apply = True
|
99 |
args.multi_step_model = "flux"
|
100 |
|
101 |
# Check if args are the same as the loaded_model_setup except for the prompt
|
|
|
115 |
|
116 |
return f"{model} model already loaded with the same configuration.", loaded_model_setup
|
117 |
|
118 |
+
# Attempt to set up the model
|
119 |
try:
|
120 |
+
# If other args differ, proceed with the setup
|
121 |
args, trainer, device, dtype, shape, enable_grad, multi_apply_fn, settings = setup(args, loaded_model_setup)
|
122 |
new_loaded_setup = [args, trainer, device, dtype, shape, enable_grad, multi_apply_fn, settings]
|
123 |
+
return f"{model} model loaded successfully!", new_loaded_setup
|
124 |
|
125 |
except Exception as e:
|
126 |
+
print(f"Failed to load {model} model: {e}.")
|
127 |
+
return f"Failed to load {model} model: {e}. You can try again, as it usually finally loads on the second try :)", None
|
128 |
+
|
129 |
|
130 |
def generate_image(setup_args, num_iterations):
|
131 |
torch.cuda.empty_cache() # Free up cached memory
|