Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -6,9 +6,10 @@ from diffusers import StableDiffusionXLPipeline
|
|
6 |
from diffusers import AutoencoderTiny, AutoencoderKL
|
7 |
|
8 |
dtype = torch.bfloat16
|
|
|
9 |
#taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
10 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
11 |
-
pipeline = StableDiffusionXLPipeline.from_pretrained("bobber/bigasp2", torch_dtype=dtype, vae=good_vae).to(
|
12 |
|
13 |
@spaces.GPU
|
14 |
def generate(prompt, negative_prompt, width, height, sample_steps, guidance_scale):
|
|
|
6 |
from diffusers import AutoencoderTiny, AutoencoderKL
|
7 |
|
8 |
dtype = torch.bfloat16
|
9 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
#taef1 = AutoencoderTiny.from_pretrained("madebyollin/taef1", torch_dtype=dtype).to(device)
|
11 |
good_vae = AutoencoderKL.from_pretrained("black-forest-labs/FLUX.1-dev", subfolder="vae", torch_dtype=dtype).to(device)
|
12 |
+
pipeline = StableDiffusionXLPipeline.from_pretrained("bobber/bigasp2", torch_dtype=dtype, vae=good_vae).to(device)
|
13 |
|
14 |
@spaces.GPU
|
15 |
def generate(prompt, negative_prompt, width, height, sample_steps, guidance_scale):
|