ChenoAi commited on
Commit
4addd7e
·
verified ·
1 Parent(s): 82213a5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -22,13 +22,14 @@ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD", "0") == "1"
22
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
23
 
24
  if torch.cuda.is_available():
25
- pipe = StableDiffusionXLPipeline.from_pretrained(
 
26
  "dataautogpt3/OpenDalleV1.1",
 
27
  torch_dtype=torch.float16,
28
  use_safetensors=True,
29
- add_watermarker=False
30
  )
31
- pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
32
  pipe.to("cuda")
33
 
34
 
@@ -154,9 +155,9 @@ with gr.Blocks(css=css) as demo:
154
  num_inference_steps = gr.Slider(
155
  label="Number of inference steps",
156
  minimum=1,
157
- maximum=15,
158
  step=1,
159
- value=8,
160
  )
161
 
162
  gr.Examples(
 
22
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
23
 
24
  if torch.cuda.is_available():
25
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
26
+ pipe = DiffusionPipeline.from_pretrained(
27
  "dataautogpt3/OpenDalleV1.1",
28
+ vae=vae,
29
  torch_dtype=torch.float16,
30
  use_safetensors=True,
31
+ variant="fp16",
32
  )
 
33
  pipe.to("cuda")
34
 
35
 
 
155
  num_inference_steps = gr.Slider(
156
  label="Number of inference steps",
157
  minimum=1,
158
+ maximum=50,
159
  step=1,
160
+ value=30,
161
  )
162
 
163
  gr.Examples(