amildravid4292 commited on
Commit
cfbf1bf
·
verified ·
1 Parent(s): 4ef9ffa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -0
app.py CHANGED
@@ -17,6 +17,23 @@ from editing import get_direction, debias
17
  from lora_w2w import LoRAw2w
18
  from huggingface_hub import snapshot_download
19
  import spaces
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  device = gr.State()
22
  generator = gr.State()
@@ -83,6 +100,47 @@ thick.value = debias(thick.value, "Pale_Skin", df, pinverse, device.value)
83
  thick.value = debias(thick.value, "Heavy_Makeup", df, pinverse, device.value)
84
 
85
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  @torch.no_grad()
87
  @spaces.GPU
88
  def sample_weights(unet, proj, mean, std, v, device, factor = 1.0):
 
17
  from lora_w2w import LoRAw2w
18
  from huggingface_hub import snapshot_download
19
  import spaces
20
+ from transformers import CLIPTextModel
21
+ from lora_w2w import LoRAw2w
22
+ from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel, LMSDiscreteScheduler
23
+ from transformers import AutoTokenizer, PretrainedConfig
24
+ import warnings
25
+ warnings.filterwarnings("ignore")
26
+ from diffusers import (
27
+ AutoencoderKL,
28
+ DDPMScheduler,
29
+ DiffusionPipeline,
30
+ DPMSolverMultistepScheduler,
31
+ UNet2DConditionModel,
32
+ PNDMScheduler,
33
+ StableDiffusionPipeline
34
+ )
35
+
36
+
37
 
38
  device = gr.State()
39
  generator = gr.State()
 
100
  thick.value = debias(thick.value, "Heavy_Makeup", df, pinverse, device.value)
101
 
102
 
103
+
104
+
105
+ @torch.no_grad()
106
+ @spaces.GPU
107
+ def load_models():
108
+ pretrained_model_name_or_path = "stablediffusionapi/realistic-vision-v51"
109
+
110
+ revision = None
111
+ rank = 1
112
+ weight_dtype = torch.bfloat16
113
+
114
+ # Load scheduler, tokenizer and models.
115
+ pipe = StableDiffusionPipeline.from_pretrained("stablediffusionapi/realistic-vision-v51",
116
+ torch_dtype=torch.float16,safety_checker = None,
117
+ requires_safety_checker = False).to(device.value)
118
+ noise_scheduler = pipe.scheduler
119
+ del pipe
120
+ tokenizer = AutoTokenizer.from_pretrained(
121
+ pretrained_model_name_or_path, subfolder="tokenizer", revision=revision
122
+ )
123
+ text_encoder = CLIPTextModel.from_pretrained(
124
+ pretrained_model_name_or_path, subfolder="text_encoder", revision=revision
125
+ )
126
+ vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder="vae", revision=revision)
127
+ unet = UNet2DConditionModel.from_pretrained(
128
+ pretrained_model_name_or_path, subfolder="unet", revision=revision
129
+ )
130
+
131
+ unet.requires_grad_(False)
132
+ unet.to(device, dtype=weight_dtype)
133
+ vae.requires_grad_(False)
134
+
135
+ text_encoder.requires_grad_(False)
136
+ vae.requires_grad_(False)
137
+ vae.to(device.value, dtype=weight_dtype)
138
+ text_encoder.to(device.value, dtype=weight_dtype)
139
+ print("")
140
+
141
+ return unet, vae, text_encoder, tokenizer, noise_scheduler
142
+
143
+
144
  @torch.no_grad()
145
  @spaces.GPU
146
  def sample_weights(unet, proj, mean, std, v, device, factor = 1.0):