craftgamesnetwork commited on
Commit
4dfd47c
·
verified ·
1 Parent(s): abc878f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -4
app.py CHANGED
@@ -62,27 +62,23 @@ def generate(
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
  pipe.enable_sequential_cpu_offload()
64
  pipe.unet.set_default_attn_processor()
65
- pipe.enable_vae_slicing()
66
 
67
  if use_vae:
68
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
69
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
70
  pipe.enable_sequential_cpu_offload()
71
  pipe.unet.set_default_attn_processor()
72
- pipe.enable_vae_slicing()
73
 
74
  if use_img2img:
75
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
76
  pipe.enable_sequential_cpu_offload()
77
  pipe.unet.set_default_attn_processor()
78
- pipe.enable_vae_slicing()
79
 
80
  if use_vae:
81
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
82
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
83
  pipe.enable_sequential_cpu_offload()
84
  pipe.unet.set_default_attn_processor()
85
- pipe.enable_vae_slicing()
86
 
87
  response = requests.get(url)
88
  init_image = Image.open(BytesIO(response.content)).convert("RGB")
 
62
  pipe = DiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16)
63
  pipe.enable_sequential_cpu_offload()
64
  pipe.unet.set_default_attn_processor()
 
65
 
66
  if use_vae:
67
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
68
  pipe = DiffusionPipeline.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
69
  pipe.enable_sequential_cpu_offload()
70
  pipe.unet.set_default_attn_processor()
 
71
 
72
  if use_img2img:
73
  pipe = AutoPipelineForImage2Image.from_pretrained(model, torch_dtype=torch.float16)
74
  pipe.enable_sequential_cpu_offload()
75
  pipe.unet.set_default_attn_processor()
 
76
 
77
  if use_vae:
78
  vae = AutoencoderKL.from_pretrained(vaecall, torch_dtype=torch.float16)
79
  pipe = AutoPipelineForImage2Image.from_pretrained(model, vae=vae, torch_dtype=torch.float16)
80
  pipe.enable_sequential_cpu_offload()
81
  pipe.unet.set_default_attn_processor()
 
82
 
83
  response = requests.get(url)
84
  init_image = Image.open(BytesIO(response.content)).convert("RGB")