taoki commited on
Commit
f268b80
·
verified ·
1 Parent(s): 9767672

Upload openvino_pipe.py

Browse files
Files changed (1) hide show
  1. openvino_pipe.py +273 -0
openvino_pipe.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # origin: https://github.com/intel/openvino-ai-plugins-gimp/blob/ae93e7291fab6d372c958da18e497acb9d927055/gimpopenvino/tools/openvino_common/models_ov/stable_diffusion_engine.py#L748
2
+
3
+ import os
4
+ from typing import Union, Optional, Any, List, Dict
5
+
6
+ import torch
7
+ from openvino.runtime import Core
8
+ from diffusers import DiffusionPipeline, LCMScheduler, ImagePipelineOutput
9
+ from diffusers.image_processor import VaeImageProcessor
10
+ from transformers import CLIPTokenizer
11
+
12
+
13
+ class LatentConsistencyEngine(DiffusionPipeline):
14
+ def __init__(
15
+ self,
16
+ model="SimianLuo/LCM_Dreamshaper_v7",
17
+ tokenizer="openai/clip-vit-large-patch14",
18
+ device=["CPU", "CPU", "CPU"],
19
+ ):
20
+ super().__init__()
21
+ try:
22
+ self.tokenizer = CLIPTokenizer.from_pretrained(model, local_files_only=True)
23
+ except:
24
+ self.tokenizer = CLIPTokenizer.from_pretrained(tokenizer)
25
+ self.tokenizer.save_pretrained(model)
26
+
27
+ self.core = Core()
28
+ self.core.set_property({'CACHE_DIR': os.path.join(model, 'cache')}) # adding caching to reduce init time
29
+ # text features
30
+
31
+ print("Text Device:", device[0])
32
+ self.text_encoder = self.core.compile_model(os.path.join(model, "text_encoder.xml"), device[0])
33
+ self._text_encoder_output = self.text_encoder.output(0)
34
+
35
+ # diffusion
36
+ print("unet Device:", device[1])
37
+ self.unet = self.core.compile_model(os.path.join(model, "unet.xml"), device[1])
38
+ self._unet_output = self.unet.output(0)
39
+ self.infer_request = self.unet.create_infer_request()
40
+
41
+ # decoder
42
+ print("Vae Device:", device[2])
43
+
44
+ self.vae_decoder = self.core.compile_model(os.path.join(model, "vae_decoder.xml"), device[2])
45
+ self.infer_request_vae = self.vae_decoder.create_infer_request()
46
+ self.safety_checker = None #pipe.safety_checker
47
+ self.feature_extractor = None #pipe.feature_extractor
48
+ self.vae_scale_factor = 2 ** 3
49
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
50
+ self.scheduler = LCMScheduler(
51
+ beta_start=0.00085,
52
+ beta_end=0.012,
53
+ beta_schedule="scaled_linear"
54
+ )
55
+
56
+ def _encode_prompt(
57
+ self,
58
+ prompt,
59
+ num_images_per_prompt,
60
+ prompt_embeds: None,
61
+ ):
62
+ r"""
63
+ Encodes the prompt into text encoder hidden states.
64
+ Args:
65
+ prompt (`str` or `List[str]`, *optional*):
66
+ prompt to be encoded
67
+ num_images_per_prompt (`int`):
68
+ number of images that should be generated per prompt
69
+ prompt_embeds (`torch.FloatTensor`, *optional*):
70
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
71
+ provided, text embeddings will be generated from `prompt` input argument.
72
+ """
73
+
74
+ if prompt_embeds is None:
75
+
76
+ text_inputs = self.tokenizer(
77
+ prompt,
78
+ padding="max_length",
79
+ max_length=self.tokenizer.model_max_length,
80
+ truncation=True,
81
+ return_tensors="pt",
82
+ )
83
+ text_input_ids = text_inputs.input_ids
84
+ untruncated_ids = self.tokenizer(
85
+ prompt, padding="longest", return_tensors="pt"
86
+ ).input_ids
87
+
88
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[
89
+ -1
90
+ ] and not torch.equal(text_input_ids, untruncated_ids):
91
+ removed_text = self.tokenizer.batch_decode(
92
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
93
+ )
94
+
95
+ prompt_embeds = self.text_encoder(text_input_ids, share_inputs=True, share_outputs=True)
96
+ prompt_embeds = torch.from_numpy(prompt_embeds[0])
97
+
98
+ bs_embed, seq_len, _ = prompt_embeds.shape
99
+ # duplicate text embeddings for each generation per prompt
100
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
101
+ prompt_embeds = prompt_embeds.view(
102
+ bs_embed * num_images_per_prompt, seq_len, -1
103
+ )
104
+
105
+ # Don't need to get uncond prompt embedding because of LCM Guided Distillation
106
+ return prompt_embeds
107
+
108
+ def run_safety_checker(self, image, dtype):
109
+ if self.safety_checker is None:
110
+ has_nsfw_concept = None
111
+ else:
112
+ if torch.is_tensor(image):
113
+ feature_extractor_input = self.image_processor.postprocess(
114
+ image, output_type="pil"
115
+ )
116
+ else:
117
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
118
+ safety_checker_input = self.feature_extractor(
119
+ feature_extractor_input, return_tensors="pt"
120
+ )
121
+ image, has_nsfw_concept = self.safety_checker(
122
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
123
+ )
124
+ return image, has_nsfw_concept
125
+
126
+ def prepare_latents(
127
+ self, batch_size, num_channels_latents, height, width, dtype, latents=None
128
+ ):
129
+ shape = (
130
+ batch_size,
131
+ num_channels_latents,
132
+ height // self.vae_scale_factor,
133
+ width // self.vae_scale_factor,
134
+ )
135
+ if latents is None:
136
+ latents = torch.randn(shape, dtype=dtype)
137
+ # scale the initial noise by the standard deviation required by the scheduler
138
+ return latents
139
+
140
+ def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32):
141
+ """
142
+ see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
143
+ Args:
144
+ timesteps: torch.Tensor: generate embedding vectors at these timesteps
145
+ embedding_dim: int: dimension of the embeddings to generate
146
+ dtype: data type of the generated embeddings
147
+ Returns:
148
+ embedding vectors with shape `(len(timesteps), embedding_dim)`
149
+ """
150
+ assert len(w.shape) == 1
151
+ w = w * 1000.0
152
+
153
+ half_dim = embedding_dim // 2
154
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
155
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
156
+ emb = w.to(dtype)[:, None] * emb[None, :]
157
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
158
+ if embedding_dim % 2 == 1: # zero pad
159
+ emb = torch.nn.functional.pad(emb, (0, 1))
160
+ assert emb.shape == (w.shape[0], embedding_dim)
161
+ return emb
162
+
163
+ @torch.no_grad()
164
+ def __call__(
165
+ self,
166
+ prompt: Union[str, List[str]] = None,
167
+ height: Optional[int] = 512,
168
+ width: Optional[int] = 512,
169
+ guidance_scale: float = 7.5,
170
+ scheduler = None,
171
+ num_images_per_prompt: Optional[int] = 1,
172
+ latents: Optional[torch.FloatTensor] = None,
173
+ num_inference_steps: int = 4,
174
+ lcm_origin_steps: int = 50,
175
+ prompt_embeds: Optional[torch.FloatTensor] = None,
176
+ output_type: Optional[str] = "pil",
177
+ return_dict: bool = True,
178
+ model: Optional[Dict[str, any]] = None,
179
+ seed: Optional[int] = 1234567,
180
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
181
+ callback = None,
182
+ callback_userdata = None
183
+ ):
184
+
185
+ # 1. Define call parameters
186
+ if prompt is not None and isinstance(prompt, str):
187
+ batch_size = 1
188
+ elif prompt is not None and isinstance(prompt, list):
189
+ batch_size = len(prompt)
190
+ else:
191
+ batch_size = prompt_embeds.shape[0]
192
+
193
+ if seed is not None:
194
+ torch.manual_seed(seed)
195
+
196
+ #print("After Step 1: batch size is ", batch_size)
197
+ # do_classifier_free_guidance = guidance_scale > 0.0
198
+ # In LCM Implementation: cfg_noise = noise_cond + cfg_scale * (noise_cond - noise_uncond) , (cfg_scale > 0.0 using CFG)
199
+
200
+ # 2. Encode input prompt
201
+ prompt_embeds = self._encode_prompt(
202
+ prompt,
203
+ num_images_per_prompt,
204
+ prompt_embeds=prompt_embeds,
205
+ )
206
+ #print("After Step 2: prompt embeds is ", prompt_embeds)
207
+ #print("After Step 2: scheduler is ", scheduler )
208
+ # 3. Prepare timesteps
209
+ self.scheduler.set_timesteps(num_inference_steps, original_inference_steps=lcm_origin_steps)
210
+ timesteps = self.scheduler.timesteps
211
+
212
+ #print("After Step 3: timesteps is ", timesteps)
213
+
214
+ # 4. Prepare latent variable
215
+ num_channels_latents = 4
216
+ latents = self.prepare_latents(
217
+ batch_size * num_images_per_prompt,
218
+ num_channels_latents,
219
+ height,
220
+ width,
221
+ prompt_embeds.dtype,
222
+ latents,
223
+ )
224
+ latents = latents * self.scheduler.init_noise_sigma
225
+
226
+ #print("After Step 4: ")
227
+ bs = batch_size * num_images_per_prompt
228
+
229
+ # 5. Get Guidance Scale Embedding
230
+ w = torch.tensor(guidance_scale).repeat(bs)
231
+ w_embedding = self.get_w_embedding(w, embedding_dim=256)
232
+ #print("After Step 5: ")
233
+ # 6. LCM MultiStep Sampling Loop:
234
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
235
+ for i, t in enumerate(timesteps):
236
+ if callback:
237
+ callback(i+1, callback_userdata)
238
+
239
+ ts = torch.full((bs,), t, dtype=torch.long)
240
+
241
+ # model prediction (v-prediction, eps, x)
242
+ model_pred = self.unet([latents, ts, prompt_embeds, w_embedding],share_inputs=True, share_outputs=True)[0]
243
+
244
+ # compute the previous noisy sample x_t -> x_t-1
245
+ latents, denoised = self.scheduler.step(
246
+ torch.from_numpy(model_pred), t, latents, return_dict=False
247
+ )
248
+ progress_bar.update()
249
+
250
+ #print("After Step 6: ")
251
+
252
+ #vae_start = time.time()
253
+
254
+ if not output_type == "latent":
255
+ image = torch.from_numpy(self.vae_decoder(denoised / 0.18215, share_inputs=True, share_outputs=True)[0])
256
+ else:
257
+ image = denoised
258
+
259
+ #print("vae decoder done", time.time() - vae_start)
260
+ #post_start = time.time()
261
+
262
+ #if has_nsfw_concept is None:
263
+ do_denormalize = [True] * image.shape[0]
264
+ #else:
265
+ # do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
266
+
267
+ #print ("After do_denormalize: image is ", image)
268
+
269
+ image = self.image_processor.postprocess(
270
+ image, output_type=output_type, do_denormalize=do_denormalize
271
+ )
272
+
273
+ return ImagePipelineOutput([image[0]])