Ashoka74 commited on
Commit
d8bf62c
Β·
verified Β·
1 Parent(s): c1ad03b

Update gradio_demo.py

Browse files
Files changed (1) hide show
  1. gradio_demo.py +4 -4
gradio_demo.py CHANGED
@@ -95,15 +95,15 @@ def enable_efficient_attention():
95
  print(f"Xformers error: {e}")
96
  print("Falling back to sliced attention")
97
  # Use sliced attention for RTX 2070
98
- unet.set_attention_slice_size(4)
99
- vae.set_attention_slice_size(4)
100
  unet.set_attn_processor(AttnProcessor2_0())
101
  vae.set_attn_processor(AttnProcessor2_0())
102
  else:
103
  # Fallback for when xformers is not available
104
  print("Using sliced attention")
105
- unet.set_attention_slice_size(4)
106
- vae.set_attention_slice_size(4)
107
  unet.set_attn_processor(AttnProcessor2_0())
108
  vae.set_attn_processor(AttnProcessor2_0())
109
 
 
95
  print(f"Xformers error: {e}")
96
  print("Falling back to sliced attention")
97
  # Use sliced attention for RTX 2070
98
+ # unet.set_attention_slice_size(4)
99
+ # vae.set_attention_slice_size(4)
100
  unet.set_attn_processor(AttnProcessor2_0())
101
  vae.set_attn_processor(AttnProcessor2_0())
102
  else:
103
  # Fallback for when xformers is not available
104
  print("Using sliced attention")
105
+ # unet.set_attention_slice_size(4)
106
+ # vae.set_attention_slice_size(4)
107
  unet.set_attn_processor(AttnProcessor2_0())
108
  vae.set_attn_processor(AttnProcessor2_0())
109