bryanbrunetti commited on
Commit
b6b5406
·
1 Parent(s): bf4853f

adjust the spaces duration

Browse files
Files changed (1) hide show
  1. app.py +5 -5
app.py CHANGED
@@ -3,10 +3,10 @@ import numpy as np
3
  import random
4
  import spaces
5
  import torch
6
- from diffusers import DiffusionPipeline, FlowMatchEulerDiscreteScheduler
7
- from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
8
- from huggingface_hub import hf_hub_download
9
- import os
10
 
11
  dtype = torch.bfloat16
12
 
@@ -28,7 +28,7 @@ print(f"Device is: {device}")
28
  pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to(device)
29
 
30
 
31
- @spaces.GPU(duration=300)
32
  def infer(prompt, lora_models, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0,
33
  num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
34
  global pipe
 
3
  import random
4
  import spaces
5
  import torch
6
+ from diffusers import DiffusionPipeline#, FlowMatchEulerDiscreteScheduler
7
+ # from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
8
+ # from huggingface_hub import hf_hub_download
9
+ # import os
10
 
11
  dtype = torch.bfloat16
12
 
 
28
  pipe = DiffusionPipeline.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to(device)
29
 
30
 
31
+ @spaces.GPU(duration=120)
32
  def infer(prompt, lora_models, seed=42, randomize_seed=False, width=1024, height=1024, guidance_scale=5.0,
33
  num_inference_steps=28, progress=gr.Progress(track_tqdm=True)):
34
  global pipe