Manjushri commited on
Commit
ad055e2
·
1 Parent(s): 18906cd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -11,12 +11,12 @@ if torch.cuda.is_available():
11
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
  pipe.enable_xformers_memory_efficient_attention()
13
  pipe = pipe.to(device)
14
- pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
15
  torch.cuda.empty_cache()
16
- refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
17
- refiner.enable_xformers_memory_efficient_attention()
18
- refiner.enable_sequential_cpu_offload()
19
- refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
20
  else:
21
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
22
  pipe = pipe.to(device)
 
11
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
  pipe.enable_xformers_memory_efficient_attention()
13
  pipe = pipe.to(device)
14
+ #pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
15
  torch.cuda.empty_cache()
16
+ #refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
17
+ #refiner.enable_xformers_memory_efficient_attention()
18
+ #refiner.enable_sequential_cpu_offload()
19
+ #refiner.unet = torch.compile(refiner.unet, mode="reduce-overhead", fullgraph=True)
20
  else:
21
  pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
22
  pipe = pipe.to(device)