Nick088 commited on
Commit
7d81189
1 Parent(s): 75b503f

removed enable_attention_slicing

Browse files

enable_attention_slicing saves up memory, but decreases a bit the inference speed, so as zerogpu runs on 69 gb of ram (as of rn) its not needed, so i lowered the zerogpu duration from 180 to 160 seconds

Files changed (1) hide show
  1. app.py +1 -5
app.py CHANGED
@@ -26,12 +26,8 @@ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES", "0")
26
  pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
27
  pipe.to(device)
28
 
29
- # Enable memory optimizations
30
- pipe.enable_attention_slicing()
31
-
32
-
33
  # Define the image generation function
34
- @spaces.GPU(duration=180)
35
  def generate_image(prompt, num_inference_steps, height, width, guidance_scale, seed, num_images_per_prompt, progress=gr.Progress(track_tqdm=True)):
36
  if seed == 0:
37
  seed = random.randint(1, MAX_SEED)
 
26
  pipe = FluxPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16)
27
  pipe.to(device)
28
 
 
 
 
 
29
  # Define the image generation function
30
+ @spaces.GPU(duration=160)
31
  def generate_image(prompt, num_inference_steps, height, width, guidance_scale, seed, num_images_per_prompt, progress=gr.Progress(track_tqdm=True)):
32
  if seed == 0:
33
  seed = random.randint(1, MAX_SEED)