craftgamesnetwork commited on
Commit
abc878f
·
verified ·
1 Parent(s): eeaebf9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -60,27 +60,27 @@ def generate(
60
 
61
  if not use_img2img:
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
- pipe.enable_model_cpu_offload()
64
  pipe.unet.set_default_attn_processor()
65
  pipe.enable_vae_slicing()
66
 
67
  if use_vae:
68
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
69
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
70
- pipe.enable_model_cpu_offload()
71
  pipe.unet.set_default_attn_processor()
72
  pipe.enable_vae_slicing()
73
 
74
  if use_img2img:
75
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
76
- pipe.enable_model_cpu_offload()
77
  pipe.unet.set_default_attn_processor()
78
  pipe.enable_vae_slicing()
79
 
80
  if use_vae:
81
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
82
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
83
- pipe.enable_model_cpu_offload()
84
  pipe.unet.set_default_attn_processor()
85
  pipe.enable_vae_slicing()
86
 
 
60
 
61
  if not use_img2img:
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
+ pipe.enable_sequential_cpu_offload()
64
  pipe.unet.set_default_attn_processor()
65
  pipe.enable_vae_slicing()
66
 
67
  if use_vae:
68
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
69
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
70
+ pipe.enable_sequential_cpu_offload()
71
  pipe.unet.set_default_attn_processor()
72
  pipe.enable_vae_slicing()
73
 
74
  if use_img2img:
75
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
76
+ pipe.enable_sequential_cpu_offload()
77
  pipe.unet.set_default_attn_processor()
78
  pipe.enable_vae_slicing()
79
 
80
  if use_vae:
81
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
82
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
83
+ pipe.enable_sequential_cpu_offload()
84
  pipe.unet.set_default_attn_processor()
85
  pipe.enable_vae_slicing()
86