Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -32,6 +32,44 @@ from diffusers import (
|
|
32 |
StableDiffusionPipeline
|
33 |
)
|
34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
|
37 |
device = gr.State()
|
@@ -101,43 +139,6 @@ thick.value = debias(thick.value, "Heavy_Makeup", df, pinverse, device.value)
|
|
101 |
|
102 |
|
103 |
|
104 |
-
@torch.no_grad()
|
105 |
-
@spaces.GPU
|
106 |
-
def load_models():
|
107 |
-
pretrained_model_name_or_path = "stablediffusionapi/realistic-vision-v51"
|
108 |
-
|
109 |
-
revision = None
|
110 |
-
rank = 1
|
111 |
-
weight_dtype = torch.bfloat16
|
112 |
-
|
113 |
-
# Load scheduler, tokenizer and models.
|
114 |
-
pipe = StableDiffusionPipeline.from_pretrained("stablediffusionapi/realistic-vision-v51",
|
115 |
-
torch_dtype=torch.float16,safety_checker = None,
|
116 |
-
requires_safety_checker = False).to(device.value)
|
117 |
-
noise_scheduler = pipe.scheduler
|
118 |
-
del pipe
|
119 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
120 |
-
pretrained_model_name_or_path, subfolder="tokenizer", revision=revision
|
121 |
-
)
|
122 |
-
text_encoder = CLIPTextModel.from_pretrained(
|
123 |
-
pretrained_model_name_or_path, subfolder="text_encoder", revision=revision
|
124 |
-
)
|
125 |
-
vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae", revision=revision)
|
126 |
-
unet = UNet2DConditionModel.from_pretrained(
|
127 |
-
pretrained_model_name_or_path, subfolder="unet", revision=revision
|
128 |
-
)
|
129 |
-
|
130 |
-
unet.requires_grad_(False)
|
131 |
-
unet.to(device, dtype=weight_dtype)
|
132 |
-
vae.requires_grad_(False)
|
133 |
-
|
134 |
-
text_encoder.requires_grad_(False)
|
135 |
-
vae.requires_grad_(False)
|
136 |
-
vae.to(device.value, dtype=weight_dtype)
|
137 |
-
text_encoder.to(device.value, dtype=weight_dtype)
|
138 |
-
print("")
|
139 |
-
|
140 |
-
return unet, vae, text_encoder, tokenizer, noise_scheduler
|
141 |
|
142 |
|
143 |
@torch.no_grad()
|
|
|
32 |
StableDiffusionPipeline
|
33 |
)
|
34 |
|
35 |
+
@torch.no_grad()
|
36 |
+
@spaces.GPU
|
37 |
+
def load_models():
|
38 |
+
pretrained_model_name_or_path = "stablediffusionapi/realistic-vision-v51"
|
39 |
+
|
40 |
+
revision = None
|
41 |
+
rank = 1
|
42 |
+
weight_dtype = torch.bfloat16
|
43 |
+
|
44 |
+
# Load scheduler, tokenizer and models.
|
45 |
+
pipe = StableDiffusionPipeline.from_pretrained("stablediffusionapi/realistic-vision-v51",
|
46 |
+
torch_dtype=torch.float16,safety_checker = None,
|
47 |
+
requires_safety_checker = False).to(device.value)
|
48 |
+
noise_scheduler = pipe.scheduler
|
49 |
+
del pipe
|
50 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
51 |
+
pretrained_model_name_or_path, subfolder="tokenizer", revision=revision
|
52 |
+
)
|
53 |
+
text_encoder = CLIPTextModel.from_pretrained(
|
54 |
+
pretrained_model_name_or_path, subfolder="text_encoder", revision=revision
|
55 |
+
)
|
56 |
+
vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae", revision=revision)
|
57 |
+
unet = UNet2DConditionModel.from_pretrained(
|
58 |
+
pretrained_model_name_or_path, subfolder="unet", revision=revision
|
59 |
+
)
|
60 |
+
|
61 |
+
unet.requires_grad_(False)
|
62 |
+
unet.to(device, dtype=weight_dtype)
|
63 |
+
vae.requires_grad_(False)
|
64 |
+
|
65 |
+
text_encoder.requires_grad_(False)
|
66 |
+
vae.requires_grad_(False)
|
67 |
+
vae.to(device.value, dtype=weight_dtype)
|
68 |
+
text_encoder.to(device.value, dtype=weight_dtype)
|
69 |
+
print("")
|
70 |
+
|
71 |
+
return unet, vae, text_encoder, tokenizer, noise_scheduler
|
72 |
+
|
73 |
|
74 |
|
75 |
device = gr.State()
|
|
|
139 |
|
140 |
|
141 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
|
143 |
|
144 |
@torch.no_grad()
|