Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -10,7 +10,7 @@ import numpy as np
|
|
10 |
import PIL.Image
|
11 |
import spaces
|
12 |
import torch
|
13 |
-
from diffusers import AutoencoderKL, DiffusionPipeline
|
14 |
|
15 |
MARKDOWN = """
|
16 |
This demo utilizes <a href="https://huggingface.co/dataautogpt3/OpenDalleV1.1">OpenDalle V1.1</a> by @dataautogpt3.
|
@@ -34,8 +34,8 @@ ENABLE_REFINER = os.getenv("ENABLE_REFINER", "0") == "1"
|
|
34 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
35 |
if torch.cuda.is_available():
|
36 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
37 |
-
pipe =
|
38 |
-
"dataautogpt3/
|
39 |
vae=vae,
|
40 |
torch_dtype=torch.float16,
|
41 |
use_safetensors=True,
|
|
|
10 |
import PIL.Image
|
11 |
import spaces
|
12 |
import torch
|
13 |
+
from diffusers import AutoencoderKL, DiffusionPipeline, StableDiffusionXLPipeline, KDPM2AncestralDiscreteScheduler
|
14 |
|
15 |
MARKDOWN = """
|
16 |
This demo utilizes <a href="https://huggingface.co/dataautogpt3/OpenDalleV1.1">OpenDalle V1.1</a> by @dataautogpt3.
|
|
|
34 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
35 |
if torch.cuda.is_available():
|
36 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
37 |
+
pipe = StableDiffusionXLPipeline.from_pretrained(
|
38 |
+
"dataautogpt3/ProteusV0.2",
|
39 |
vae=vae,
|
40 |
torch_dtype=torch.float16,
|
41 |
use_safetensors=True,
|