Spaces:
Running
on
Zero
Running
on
Zero
Delete pipeline_ltx_condition.py
Browse files- pipeline_ltx_condition.py +0 -1194
pipeline_ltx_condition.py
DELETED
@@ -1,1194 +0,0 @@
|
|
1 |
-
# Copyright 2024 Lightricks and The HuggingFace Team. All rights reserved.
|
2 |
-
#
|
3 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
4 |
-
# you may not use this file except in compliance with the License.
|
5 |
-
# You may obtain a copy of the License at
|
6 |
-
#
|
7 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
8 |
-
#
|
9 |
-
# Unless required by applicable law or agreed to in writing, software
|
10 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
11 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
12 |
-
# See the License for the specific language governing permissions and
|
13 |
-
# limitations under the License.
|
14 |
-
|
15 |
-
import inspect
|
16 |
-
from dataclasses import dataclass
|
17 |
-
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
|
18 |
-
|
19 |
-
import PIL.Image
|
20 |
-
import torch
|
21 |
-
from transformers import T5EncoderModel, T5TokenizerFast
|
22 |
-
|
23 |
-
from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
|
24 |
-
from diffusers.image_processor import PipelineImageInput
|
25 |
-
from diffusers.loaders import FromSingleFileMixin, LTXVideoLoraLoaderMixin
|
26 |
-
from diffusers.models.autoencoders import AutoencoderKLLTXVideo
|
27 |
-
from diffusers.models.transformers import LTXVideoTransformer3DModel
|
28 |
-
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
|
29 |
-
from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring
|
30 |
-
from diffusers.utils.torch_utils import randn_tensor
|
31 |
-
from diffusers.video_processor import VideoProcessor
|
32 |
-
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
|
33 |
-
from diffusers.pipelines.ltx.pipeline_output import LTXPipelineOutput
|
34 |
-
|
35 |
-
|
36 |
-
if is_torch_xla_available():
|
37 |
-
import torch_xla.core.xla_model as xm
|
38 |
-
|
39 |
-
XLA_AVAILABLE = True
|
40 |
-
else:
|
41 |
-
XLA_AVAILABLE = False
|
42 |
-
|
43 |
-
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
|
44 |
-
|
45 |
-
EXAMPLE_DOC_STRING = """
|
46 |
-
Examples:
|
47 |
-
```py
|
48 |
-
>>> import torch
|
49 |
-
>>> from diffusers.pipelines.ltx.pipeline_ltx_condition import LTXConditionPipeline, LTXVideoCondition
|
50 |
-
>>> from diffusers.utils import export_to_video, load_video, load_image
|
51 |
-
|
52 |
-
>>> pipe = LTXConditionPipeline.from_pretrained("Lightricks/LTX-Video-0.9.5", torch_dtype=torch.bfloat16)
|
53 |
-
>>> pipe.to("cuda")
|
54 |
-
|
55 |
-
>>> # Load input image and video
|
56 |
-
>>> video = load_video(
|
57 |
-
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input-vid.mp4"
|
58 |
-
... )
|
59 |
-
>>> image = load_image(
|
60 |
-
... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/cosmos/cosmos-video2world-input.jpg"
|
61 |
-
... )
|
62 |
-
|
63 |
-
>>> # Create conditioning objects
|
64 |
-
>>> condition1 = LTXVideoCondition(
|
65 |
-
... image=image,
|
66 |
-
... frame_index=0,
|
67 |
-
... )
|
68 |
-
>>> condition2 = LTXVideoCondition(
|
69 |
-
... video=video,
|
70 |
-
... frame_index=80,
|
71 |
-
... )
|
72 |
-
|
73 |
-
>>> prompt = "The video depicts a long, straight highway stretching into the distance, flanked by metal guardrails. The road is divided into multiple lanes, with a few vehicles visible in the far distance. The surrounding landscape features dry, grassy fields on one side and rolling hills on the other. The sky is mostly clear with a few scattered clouds, suggesting a bright, sunny day. And then the camera switch to a winding mountain road covered in snow, with a single vehicle traveling along it. The road is flanked by steep, rocky cliffs and sparse vegetation. The landscape is characterized by rugged terrain and a river visible in the distance. The scene captures the solitude and beauty of a winter drive through a mountainous region."
|
74 |
-
>>> negative_prompt = "worst quality, inconsistent motion, blurry, jittery, distorted"
|
75 |
-
|
76 |
-
>>> # Generate video
|
77 |
-
>>> generator = torch.Generator("cuda").manual_seed(0)
|
78 |
-
>>> # Text-only conditioning is also supported without the need to pass `conditions`
|
79 |
-
>>> video = pipe(
|
80 |
-
... conditions=[condition1, condition2],
|
81 |
-
... prompt=prompt,
|
82 |
-
... negative_prompt=negative_prompt,
|
83 |
-
... width=768,
|
84 |
-
... height=512,
|
85 |
-
... num_frames=161,
|
86 |
-
... num_inference_steps=40,
|
87 |
-
... generator=generator,
|
88 |
-
... ).frames[0]
|
89 |
-
|
90 |
-
>>> export_to_video(video, "output.mp4", fps=24)
|
91 |
-
```
|
92 |
-
"""
|
93 |
-
|
94 |
-
|
95 |
-
@dataclass
|
96 |
-
class LTXVideoCondition:
|
97 |
-
"""
|
98 |
-
Defines a single frame-conditioning item for LTX Video - a single frame or a sequence of frames.
|
99 |
-
|
100 |
-
Attributes:
|
101 |
-
image (`PIL.Image.Image`):
|
102 |
-
The image to condition the video on.
|
103 |
-
video (`List[PIL.Image.Image]`):
|
104 |
-
The video to condition the video on.
|
105 |
-
frame_index (`int`):
|
106 |
-
The frame index at which the image or video will conditionally effect the video generation.
|
107 |
-
strength (`float`, defaults to `1.0`):
|
108 |
-
The strength of the conditioning effect. A value of `1.0` means the conditioning effect is fully applied.
|
109 |
-
"""
|
110 |
-
|
111 |
-
image: Optional[PIL.Image.Image] = None
|
112 |
-
video: Optional[List[PIL.Image.Image]] = None
|
113 |
-
frame_index: int = 0
|
114 |
-
strength: float = 1.0
|
115 |
-
|
116 |
-
|
117 |
-
# from LTX-Video/ltx_video/schedulers/rf.py
|
118 |
-
def linear_quadratic_schedule(num_steps, threshold_noise=0.025, linear_steps=None):
|
119 |
-
if linear_steps is None:
|
120 |
-
linear_steps = num_steps // 2
|
121 |
-
if num_steps < 2:
|
122 |
-
return torch.tensor([1.0])
|
123 |
-
linear_sigma_schedule = [i * threshold_noise / linear_steps for i in range(linear_steps)]
|
124 |
-
threshold_noise_step_diff = linear_steps - threshold_noise * num_steps
|
125 |
-
quadratic_steps = num_steps - linear_steps
|
126 |
-
quadratic_coef = threshold_noise_step_diff / (linear_steps * quadratic_steps**2)
|
127 |
-
linear_coef = threshold_noise / linear_steps - 2 * threshold_noise_step_diff / (quadratic_steps**2)
|
128 |
-
const = quadratic_coef * (linear_steps**2)
|
129 |
-
quadratic_sigma_schedule = [
|
130 |
-
quadratic_coef * (i**2) + linear_coef * i + const for i in range(linear_steps, num_steps)
|
131 |
-
]
|
132 |
-
sigma_schedule = linear_sigma_schedule + quadratic_sigma_schedule + [1.0]
|
133 |
-
sigma_schedule = [1.0 - x for x in sigma_schedule]
|
134 |
-
return torch.tensor(sigma_schedule[:-1])
|
135 |
-
|
136 |
-
|
137 |
-
# Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift
|
138 |
-
def calculate_shift(
|
139 |
-
image_seq_len,
|
140 |
-
base_seq_len: int = 256,
|
141 |
-
max_seq_len: int = 4096,
|
142 |
-
base_shift: float = 0.5,
|
143 |
-
max_shift: float = 1.15,
|
144 |
-
):
|
145 |
-
m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
|
146 |
-
b = base_shift - m * base_seq_len
|
147 |
-
mu = image_seq_len * m + b
|
148 |
-
return mu
|
149 |
-
|
150 |
-
|
151 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
|
152 |
-
def retrieve_timesteps(
|
153 |
-
scheduler,
|
154 |
-
num_inference_steps: Optional[int] = None,
|
155 |
-
device: Optional[Union[str, torch.device]] = None,
|
156 |
-
timesteps: Optional[List[int]] = None,
|
157 |
-
sigmas: Optional[List[float]] = None,
|
158 |
-
**kwargs,
|
159 |
-
):
|
160 |
-
r"""
|
161 |
-
Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
|
162 |
-
custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
|
163 |
-
|
164 |
-
Args:
|
165 |
-
scheduler (`SchedulerMixin`):
|
166 |
-
The scheduler to get timesteps from.
|
167 |
-
num_inference_steps (`int`):
|
168 |
-
The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
|
169 |
-
must be `None`.
|
170 |
-
device (`str` or `torch.device`, *optional*):
|
171 |
-
The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
|
172 |
-
timesteps (`List[int]`, *optional*):
|
173 |
-
Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
|
174 |
-
`num_inference_steps` and `sigmas` must be `None`.
|
175 |
-
sigmas (`List[float]`, *optional*):
|
176 |
-
Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
|
177 |
-
`num_inference_steps` and `timesteps` must be `None`.
|
178 |
-
|
179 |
-
Returns:
|
180 |
-
`Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
|
181 |
-
second element is the number of inference steps.
|
182 |
-
"""
|
183 |
-
if timesteps is not None and sigmas is not None:
|
184 |
-
raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
|
185 |
-
if timesteps is not None:
|
186 |
-
accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
187 |
-
if not accepts_timesteps:
|
188 |
-
raise ValueError(
|
189 |
-
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
190 |
-
f" timestep schedules. Please check whether you are using the correct scheduler."
|
191 |
-
)
|
192 |
-
scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
|
193 |
-
timesteps = scheduler.timesteps
|
194 |
-
num_inference_steps = len(timesteps)
|
195 |
-
elif sigmas is not None:
|
196 |
-
accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
|
197 |
-
if not accept_sigmas:
|
198 |
-
raise ValueError(
|
199 |
-
f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
|
200 |
-
f" sigmas schedules. Please check whether you are using the correct scheduler."
|
201 |
-
)
|
202 |
-
scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
|
203 |
-
timesteps = scheduler.timesteps
|
204 |
-
num_inference_steps = len(timesteps)
|
205 |
-
else:
|
206 |
-
scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
|
207 |
-
timesteps = scheduler.timesteps
|
208 |
-
return timesteps, num_inference_steps
|
209 |
-
|
210 |
-
|
211 |
-
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
|
212 |
-
def retrieve_latents(
|
213 |
-
encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
|
214 |
-
):
|
215 |
-
if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
|
216 |
-
return encoder_output.latent_dist.sample(generator)
|
217 |
-
elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
|
218 |
-
return encoder_output.latent_dist.mode()
|
219 |
-
elif hasattr(encoder_output, "latents"):
|
220 |
-
return encoder_output.latents
|
221 |
-
else:
|
222 |
-
raise AttributeError("Could not access latents of provided encoder_output")
|
223 |
-
|
224 |
-
|
225 |
-
class LTXConditionPipeline(DiffusionPipeline, FromSingleFileMixin, LTXVideoLoraLoaderMixin):
|
226 |
-
r"""
|
227 |
-
Pipeline for text/image/video-to-video generation.
|
228 |
-
|
229 |
-
Reference: https://github.com/Lightricks/LTX-Video
|
230 |
-
|
231 |
-
Args:
|
232 |
-
transformer ([`LTXVideoTransformer3DModel`]):
|
233 |
-
Conditional Transformer architecture to denoise the encoded video latents.
|
234 |
-
scheduler ([`FlowMatchEulerDiscreteScheduler`]):
|
235 |
-
A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
|
236 |
-
vae ([`AutoencoderKLLTXVideo`]):
|
237 |
-
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
|
238 |
-
text_encoder ([`T5EncoderModel`]):
|
239 |
-
[T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
|
240 |
-
the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
|
241 |
-
tokenizer (`CLIPTokenizer`):
|
242 |
-
Tokenizer of class
|
243 |
-
[CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
|
244 |
-
tokenizer (`T5TokenizerFast`):
|
245 |
-
Second Tokenizer of class
|
246 |
-
[T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
|
247 |
-
"""
|
248 |
-
|
249 |
-
model_cpu_offload_seq = "text_encoder->transformer->vae"
|
250 |
-
_optional_components = []
|
251 |
-
_callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
|
252 |
-
|
253 |
-
def __init__(
|
254 |
-
self,
|
255 |
-
scheduler: FlowMatchEulerDiscreteScheduler,
|
256 |
-
vae: AutoencoderKLLTXVideo,
|
257 |
-
text_encoder: T5EncoderModel,
|
258 |
-
tokenizer: T5TokenizerFast,
|
259 |
-
transformer: LTXVideoTransformer3DModel,
|
260 |
-
):
|
261 |
-
super().__init__()
|
262 |
-
|
263 |
-
self.register_modules(
|
264 |
-
vae=vae,
|
265 |
-
text_encoder=text_encoder,
|
266 |
-
tokenizer=tokenizer,
|
267 |
-
transformer=transformer,
|
268 |
-
scheduler=scheduler,
|
269 |
-
)
|
270 |
-
|
271 |
-
self.vae_spatial_compression_ratio = (
|
272 |
-
self.vae.spatial_compression_ratio if getattr(self, "vae", None) is not None else 32
|
273 |
-
)
|
274 |
-
self.vae_temporal_compression_ratio = (
|
275 |
-
self.vae.temporal_compression_ratio if getattr(self, "vae", None) is not None else 8
|
276 |
-
)
|
277 |
-
self.transformer_spatial_patch_size = (
|
278 |
-
self.transformer.config.patch_size if getattr(self, "transformer", None) is not None else 1
|
279 |
-
)
|
280 |
-
self.transformer_temporal_patch_size = (
|
281 |
-
self.transformer.config.patch_size_t if getattr(self, "transformer") is not None else 1
|
282 |
-
)
|
283 |
-
|
284 |
-
self.video_processor = VideoProcessor(vae_scale_factor=self.vae_spatial_compression_ratio)
|
285 |
-
self.tokenizer_max_length = (
|
286 |
-
self.tokenizer.model_max_length if getattr(self, "tokenizer", None) is not None else 128
|
287 |
-
)
|
288 |
-
|
289 |
-
self.default_height = 512
|
290 |
-
self.default_width = 704
|
291 |
-
self.default_frames = 121
|
292 |
-
|
293 |
-
def _get_t5_prompt_embeds(
|
294 |
-
self,
|
295 |
-
prompt: Union[str, List[str]] = None,
|
296 |
-
num_videos_per_prompt: int = 1,
|
297 |
-
max_sequence_length: int = 256,
|
298 |
-
device: Optional[torch.device] = None,
|
299 |
-
dtype: Optional[torch.dtype] = None,
|
300 |
-
):
|
301 |
-
device = device or self._execution_device
|
302 |
-
dtype = dtype or self.text_encoder.dtype
|
303 |
-
|
304 |
-
prompt = [prompt] if isinstance(prompt, str) else prompt
|
305 |
-
batch_size = len(prompt)
|
306 |
-
|
307 |
-
text_inputs = self.tokenizer(
|
308 |
-
prompt,
|
309 |
-
padding="max_length",
|
310 |
-
max_length=max_sequence_length,
|
311 |
-
truncation=True,
|
312 |
-
add_special_tokens=True,
|
313 |
-
return_tensors="pt",
|
314 |
-
)
|
315 |
-
text_input_ids = text_inputs.input_ids
|
316 |
-
prompt_attention_mask = text_inputs.attention_mask
|
317 |
-
prompt_attention_mask = prompt_attention_mask.bool().to(device)
|
318 |
-
|
319 |
-
untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
|
320 |
-
|
321 |
-
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
|
322 |
-
removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_sequence_length - 1 : -1])
|
323 |
-
logger.warning(
|
324 |
-
"The following part of your input was truncated because `max_sequence_length` is set to "
|
325 |
-
f" {max_sequence_length} tokens: {removed_text}"
|
326 |
-
)
|
327 |
-
|
328 |
-
prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask)[0]
|
329 |
-
prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
|
330 |
-
|
331 |
-
# duplicate text embeddings for each generation per prompt, using mps friendly method
|
332 |
-
_, seq_len, _ = prompt_embeds.shape
|
333 |
-
prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
|
334 |
-
prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
|
335 |
-
|
336 |
-
prompt_attention_mask = prompt_attention_mask.view(batch_size, -1)
|
337 |
-
prompt_attention_mask = prompt_attention_mask.repeat(num_videos_per_prompt, 1)
|
338 |
-
|
339 |
-
return prompt_embeds, prompt_attention_mask
|
340 |
-
|
341 |
-
# Copied from diffusers.pipelines.mochi.pipeline_mochi.MochiPipeline.encode_prompt
|
342 |
-
def encode_prompt(
|
343 |
-
self,
|
344 |
-
prompt: Union[str, List[str]],
|
345 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
346 |
-
do_classifier_free_guidance: bool = True,
|
347 |
-
num_videos_per_prompt: int = 1,
|
348 |
-
prompt_embeds: Optional[torch.Tensor] = None,
|
349 |
-
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
350 |
-
prompt_attention_mask: Optional[torch.Tensor] = None,
|
351 |
-
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
|
352 |
-
max_sequence_length: int = 256,
|
353 |
-
device: Optional[torch.device] = None,
|
354 |
-
dtype: Optional[torch.dtype] = None,
|
355 |
-
):
|
356 |
-
r"""
|
357 |
-
Encodes the prompt into text encoder hidden states.
|
358 |
-
|
359 |
-
Args:
|
360 |
-
prompt (`str` or `List[str]`, *optional*):
|
361 |
-
prompt to be encoded
|
362 |
-
negative_prompt (`str` or `List[str]`, *optional*):
|
363 |
-
The prompt or prompts not to guide the image generation. If not defined, one has to pass
|
364 |
-
`negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
|
365 |
-
less than `1`).
|
366 |
-
do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
|
367 |
-
Whether to use classifier free guidance or not.
|
368 |
-
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
369 |
-
Number of videos that should be generated per prompt. torch device to place the resulting embeddings on
|
370 |
-
prompt_embeds (`torch.Tensor`, *optional*):
|
371 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
372 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
373 |
-
negative_prompt_embeds (`torch.Tensor`, *optional*):
|
374 |
-
Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
|
375 |
-
weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
|
376 |
-
argument.
|
377 |
-
device: (`torch.device`, *optional*):
|
378 |
-
torch device
|
379 |
-
dtype: (`torch.dtype`, *optional*):
|
380 |
-
torch dtype
|
381 |
-
"""
|
382 |
-
device = device or self._execution_device
|
383 |
-
|
384 |
-
prompt = [prompt] if isinstance(prompt, str) else prompt
|
385 |
-
if prompt is not None:
|
386 |
-
batch_size = len(prompt)
|
387 |
-
else:
|
388 |
-
batch_size = prompt_embeds.shape[0]
|
389 |
-
|
390 |
-
if prompt_embeds is None:
|
391 |
-
prompt_embeds, prompt_attention_mask = self._get_t5_prompt_embeds(
|
392 |
-
prompt=prompt,
|
393 |
-
num_videos_per_prompt=num_videos_per_prompt,
|
394 |
-
max_sequence_length=max_sequence_length,
|
395 |
-
device=device,
|
396 |
-
dtype=dtype,
|
397 |
-
)
|
398 |
-
|
399 |
-
if do_classifier_free_guidance and negative_prompt_embeds is None:
|
400 |
-
negative_prompt = negative_prompt or ""
|
401 |
-
negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
|
402 |
-
|
403 |
-
if prompt is not None and type(prompt) is not type(negative_prompt):
|
404 |
-
raise TypeError(
|
405 |
-
f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
|
406 |
-
f" {type(prompt)}."
|
407 |
-
)
|
408 |
-
elif batch_size != len(negative_prompt):
|
409 |
-
raise ValueError(
|
410 |
-
f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
|
411 |
-
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
|
412 |
-
" the batch size of `prompt`."
|
413 |
-
)
|
414 |
-
|
415 |
-
negative_prompt_embeds, negative_prompt_attention_mask = self._get_t5_prompt_embeds(
|
416 |
-
prompt=negative_prompt,
|
417 |
-
num_videos_per_prompt=num_videos_per_prompt,
|
418 |
-
max_sequence_length=max_sequence_length,
|
419 |
-
device=device,
|
420 |
-
dtype=dtype,
|
421 |
-
)
|
422 |
-
|
423 |
-
return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask
|
424 |
-
|
425 |
-
def check_inputs(
|
426 |
-
self,
|
427 |
-
prompt,
|
428 |
-
conditions,
|
429 |
-
image,
|
430 |
-
video,
|
431 |
-
frame_index,
|
432 |
-
strength,
|
433 |
-
height,
|
434 |
-
width,
|
435 |
-
callback_on_step_end_tensor_inputs=None,
|
436 |
-
prompt_embeds=None,
|
437 |
-
negative_prompt_embeds=None,
|
438 |
-
prompt_attention_mask=None,
|
439 |
-
negative_prompt_attention_mask=None,
|
440 |
-
):
|
441 |
-
if height % 32 != 0 or width % 32 != 0:
|
442 |
-
raise ValueError(f"`height` and `width` have to be divisible by 32 but are {height} and {width}.")
|
443 |
-
|
444 |
-
if callback_on_step_end_tensor_inputs is not None and not all(
|
445 |
-
k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
|
446 |
-
):
|
447 |
-
raise ValueError(
|
448 |
-
f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
|
449 |
-
)
|
450 |
-
|
451 |
-
if prompt is not None and prompt_embeds is not None:
|
452 |
-
raise ValueError(
|
453 |
-
f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
|
454 |
-
" only forward one of the two."
|
455 |
-
)
|
456 |
-
elif prompt is None and prompt_embeds is None:
|
457 |
-
raise ValueError(
|
458 |
-
"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
|
459 |
-
)
|
460 |
-
elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
|
461 |
-
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
|
462 |
-
|
463 |
-
if prompt_embeds is not None and prompt_attention_mask is None:
|
464 |
-
raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.")
|
465 |
-
|
466 |
-
if negative_prompt_embeds is not None and negative_prompt_attention_mask is None:
|
467 |
-
raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.")
|
468 |
-
|
469 |
-
if prompt_embeds is not None and negative_prompt_embeds is not None:
|
470 |
-
if prompt_embeds.shape != negative_prompt_embeds.shape:
|
471 |
-
raise ValueError(
|
472 |
-
"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
|
473 |
-
f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
|
474 |
-
f" {negative_prompt_embeds.shape}."
|
475 |
-
)
|
476 |
-
if prompt_attention_mask.shape != negative_prompt_attention_mask.shape:
|
477 |
-
raise ValueError(
|
478 |
-
"`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but"
|
479 |
-
f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`"
|
480 |
-
f" {negative_prompt_attention_mask.shape}."
|
481 |
-
)
|
482 |
-
|
483 |
-
if conditions is not None and (image is not None or video is not None):
|
484 |
-
raise ValueError("If `conditions` is provided, `image` and `video` must not be provided.")
|
485 |
-
|
486 |
-
if conditions is None:
|
487 |
-
if isinstance(image, list) and isinstance(frame_index, list) and len(image) != len(frame_index):
|
488 |
-
raise ValueError(
|
489 |
-
"If `conditions` is not provided, `image` and `frame_index` must be of the same length."
|
490 |
-
)
|
491 |
-
elif isinstance(image, list) and isinstance(strength, list) and len(image) != len(strength):
|
492 |
-
raise ValueError("If `conditions` is not provided, `image` and `strength` must be of the same length.")
|
493 |
-
elif isinstance(video, list) and isinstance(frame_index, list) and len(video) != len(frame_index):
|
494 |
-
raise ValueError(
|
495 |
-
"If `conditions` is not provided, `video` and `frame_index` must be of the same length."
|
496 |
-
)
|
497 |
-
elif isinstance(video, list) and isinstance(strength, list) and len(video) != len(strength):
|
498 |
-
raise ValueError("If `conditions` is not provided, `video` and `strength` must be of the same length.")
|
499 |
-
|
500 |
-
@staticmethod
|
501 |
-
def _prepare_video_ids(
|
502 |
-
batch_size: int,
|
503 |
-
num_frames: int,
|
504 |
-
height: int,
|
505 |
-
width: int,
|
506 |
-
patch_size: int = 1,
|
507 |
-
patch_size_t: int = 1,
|
508 |
-
device: torch.device = None,
|
509 |
-
) -> torch.Tensor:
|
510 |
-
latent_sample_coords = torch.meshgrid(
|
511 |
-
torch.arange(0, num_frames, patch_size_t, device=device),
|
512 |
-
torch.arange(0, height, patch_size, device=device),
|
513 |
-
torch.arange(0, width, patch_size, device=device),
|
514 |
-
indexing="ij",
|
515 |
-
)
|
516 |
-
latent_sample_coords = torch.stack(latent_sample_coords, dim=0)
|
517 |
-
latent_coords = latent_sample_coords.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1)
|
518 |
-
latent_coords = latent_coords.reshape(batch_size, -1, num_frames * height * width)
|
519 |
-
|
520 |
-
return latent_coords
|
521 |
-
|
522 |
-
@staticmethod
|
523 |
-
def _scale_video_ids(
|
524 |
-
video_ids: torch.Tensor,
|
525 |
-
scale_factor: int = 32,
|
526 |
-
scale_factor_t: int = 8,
|
527 |
-
frame_index: int = 0,
|
528 |
-
device: torch.device = None,
|
529 |
-
) -> torch.Tensor:
|
530 |
-
scaled_latent_coords = (
|
531 |
-
video_ids
|
532 |
-
* torch.tensor([scale_factor_t, scale_factor, scale_factor], device=video_ids.device)[None, :, None]
|
533 |
-
)
|
534 |
-
scaled_latent_coords[:, 0] = (scaled_latent_coords[:, 0] + 1 - scale_factor_t).clamp(min=0)
|
535 |
-
scaled_latent_coords[:, 0] += frame_index
|
536 |
-
|
537 |
-
return scaled_latent_coords
|
538 |
-
|
539 |
-
@staticmethod
|
540 |
-
# Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._pack_latents
|
541 |
-
def _pack_latents(latents: torch.Tensor, patch_size: int = 1, patch_size_t: int = 1) -> torch.Tensor:
|
542 |
-
# Unpacked latents of shape are [B, C, F, H, W] are patched into tokens of shape [B, C, F // p_t, p_t, H // p, p, W // p, p].
|
543 |
-
# The patch dimensions are then permuted and collapsed into the channel dimension of shape:
|
544 |
-
# [B, F // p_t * H // p * W // p, C * p_t * p * p] (an ndim=3 tensor).
|
545 |
-
# dim=0 is the batch size, dim=1 is the effective video sequence length, dim=2 is the effective number of input features
|
546 |
-
batch_size, num_channels, num_frames, height, width = latents.shape
|
547 |
-
post_patch_num_frames = num_frames // patch_size_t
|
548 |
-
post_patch_height = height // patch_size
|
549 |
-
post_patch_width = width // patch_size
|
550 |
-
latents = latents.reshape(
|
551 |
-
batch_size,
|
552 |
-
-1,
|
553 |
-
post_patch_num_frames,
|
554 |
-
patch_size_t,
|
555 |
-
post_patch_height,
|
556 |
-
patch_size,
|
557 |
-
post_patch_width,
|
558 |
-
patch_size,
|
559 |
-
)
|
560 |
-
latents = latents.permute(0, 2, 4, 6, 1, 3, 5, 7).flatten(4, 7).flatten(1, 3)
|
561 |
-
return latents
|
562 |
-
|
563 |
-
@staticmethod
|
564 |
-
# Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._unpack_latents
|
565 |
-
def _unpack_latents(
|
566 |
-
latents: torch.Tensor, num_frames: int, height: int, width: int, patch_size: int = 1, patch_size_t: int = 1
|
567 |
-
) -> torch.Tensor:
|
568 |
-
# Packed latents of shape [B, S, D] (S is the effective video sequence length, D is the effective feature dimensions)
|
569 |
-
# are unpacked and reshaped into a video tensor of shape [B, C, F, H, W]. This is the inverse operation of
|
570 |
-
# what happens in the `_pack_latents` method.
|
571 |
-
batch_size = latents.size(0)
|
572 |
-
latents = latents.reshape(batch_size, num_frames, height, width, -1, patch_size_t, patch_size, patch_size)
|
573 |
-
latents = latents.permute(0, 4, 1, 5, 2, 6, 3, 7).flatten(6, 7).flatten(4, 5).flatten(2, 3)
|
574 |
-
return latents
|
575 |
-
|
576 |
-
@staticmethod
|
577 |
-
# Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._normalize_latents
|
578 |
-
def _normalize_latents(
|
579 |
-
latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0
|
580 |
-
) -> torch.Tensor:
|
581 |
-
# Normalize latents across the channel dimension [B, C, F, H, W]
|
582 |
-
latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype)
|
583 |
-
latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype)
|
584 |
-
latents = (latents - latents_mean) * scaling_factor / latents_std
|
585 |
-
return latents
|
586 |
-
|
587 |
-
@staticmethod
|
588 |
-
# Copied from diffusers.pipelines.ltx.pipeline_ltx.LTXPipeline._denormalize_latents
|
589 |
-
def _denormalize_latents(
|
590 |
-
latents: torch.Tensor, latents_mean: torch.Tensor, latents_std: torch.Tensor, scaling_factor: float = 1.0
|
591 |
-
) -> torch.Tensor:
|
592 |
-
# Denormalize latents across the channel dimension [B, C, F, H, W]
|
593 |
-
latents_mean = latents_mean.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype)
|
594 |
-
latents_std = latents_std.view(1, -1, 1, 1, 1).to(latents.device, latents.dtype)
|
595 |
-
latents = latents * latents_std / scaling_factor + latents_mean
|
596 |
-
return latents
|
597 |
-
|
598 |
-
def trim_conditioning_sequence(self, start_frame: int, sequence_num_frames: int, target_num_frames: int):
|
599 |
-
"""
|
600 |
-
Trim a conditioning sequence to the allowed number of frames.
|
601 |
-
|
602 |
-
Args:
|
603 |
-
start_frame (int): The target frame number of the first frame in the sequence.
|
604 |
-
sequence_num_frames (int): The number of frames in the sequence.
|
605 |
-
target_num_frames (int): The target number of frames in the generated video.
|
606 |
-
Returns:
|
607 |
-
int: updated sequence length
|
608 |
-
"""
|
609 |
-
scale_factor = self.vae_temporal_compression_ratio
|
610 |
-
num_frames = min(sequence_num_frames, target_num_frames - start_frame)
|
611 |
-
# Trim down to a multiple of temporal_scale_factor frames plus 1
|
612 |
-
num_frames = (num_frames - 1) // scale_factor * scale_factor + 1
|
613 |
-
return num_frames
|
614 |
-
|
615 |
-
@staticmethod
|
616 |
-
def add_noise_to_image_conditioning_latents(
|
617 |
-
t: float,
|
618 |
-
init_latents: torch.Tensor,
|
619 |
-
latents: torch.Tensor,
|
620 |
-
noise_scale: float,
|
621 |
-
conditioning_mask: torch.Tensor,
|
622 |
-
generator,
|
623 |
-
eps=1e-6,
|
624 |
-
):
|
625 |
-
"""
|
626 |
-
Add timestep-dependent noise to the hard-conditioning latents. This helps with motion continuity, especially
|
627 |
-
when conditioned on a single frame.
|
628 |
-
"""
|
629 |
-
noise = randn_tensor(
|
630 |
-
latents.shape,
|
631 |
-
generator=generator,
|
632 |
-
device=latents.device,
|
633 |
-
dtype=latents.dtype,
|
634 |
-
)
|
635 |
-
# Add noise only to hard-conditioning latents (conditioning_mask = 1.0)
|
636 |
-
need_to_noise = (conditioning_mask > 1.0 - eps).unsqueeze(-1)
|
637 |
-
noised_latents = init_latents + noise_scale * noise * (t**2)
|
638 |
-
latents = torch.where(need_to_noise, noised_latents, latents)
|
639 |
-
return latents
|
640 |
-
|
641 |
-
def prepare_latents(
|
642 |
-
self,
|
643 |
-
conditions: Optional[List[torch.Tensor]] = None,
|
644 |
-
condition_strength: Optional[List[float]] = None,
|
645 |
-
condition_frame_index: Optional[List[int]] = None,
|
646 |
-
batch_size: int = 1,
|
647 |
-
num_channels_latents: int = 128,
|
648 |
-
height: int = 512,
|
649 |
-
width: int = 704,
|
650 |
-
num_frames: int = 161,
|
651 |
-
num_prefix_latent_frames: int = 2,
|
652 |
-
generator: Optional[torch.Generator] = None,
|
653 |
-
device: Optional[torch.device] = None,
|
654 |
-
dtype: Optional[torch.dtype] = None,
|
655 |
-
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, int]:
|
656 |
-
num_latent_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1
|
657 |
-
latent_height = height // self.vae_spatial_compression_ratio
|
658 |
-
latent_width = width // self.vae_spatial_compression_ratio
|
659 |
-
|
660 |
-
shape = (batch_size, num_channels_latents, num_latent_frames, latent_height, latent_width)
|
661 |
-
latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
|
662 |
-
|
663 |
-
if len(conditions) > 0:
|
664 |
-
condition_latent_frames_mask = torch.zeros(
|
665 |
-
(batch_size, num_latent_frames), device=device, dtype=torch.float32
|
666 |
-
)
|
667 |
-
|
668 |
-
extra_conditioning_latents = []
|
669 |
-
extra_conditioning_video_ids = []
|
670 |
-
extra_conditioning_mask = []
|
671 |
-
extra_conditioning_num_latents = 0
|
672 |
-
for data, strength, frame_index in zip(conditions, condition_strength, condition_frame_index):
|
673 |
-
condition_latents = retrieve_latents(self.vae.encode(data), generator=generator)
|
674 |
-
condition_latents = self._normalize_latents(
|
675 |
-
condition_latents, self.vae.latents_mean, self.vae.latents_std
|
676 |
-
).to(device, dtype=dtype)
|
677 |
-
|
678 |
-
num_data_frames = data.size(2)
|
679 |
-
num_cond_frames = condition_latents.size(2)
|
680 |
-
|
681 |
-
if frame_index == 0:
|
682 |
-
latents[:, :, :num_cond_frames] = torch.lerp(
|
683 |
-
latents[:, :, :num_cond_frames], condition_latents, strength
|
684 |
-
)
|
685 |
-
condition_latent_frames_mask[:, :num_cond_frames] = strength
|
686 |
-
|
687 |
-
else:
|
688 |
-
if num_data_frames > 1:
|
689 |
-
if num_cond_frames < num_prefix_latent_frames:
|
690 |
-
raise ValueError(
|
691 |
-
f"Number of latent frames must be at least {num_prefix_latent_frames} but got {num_data_frames}."
|
692 |
-
)
|
693 |
-
|
694 |
-
if num_cond_frames > num_prefix_latent_frames:
|
695 |
-
start_frame = frame_index // self.vae_temporal_compression_ratio + num_prefix_latent_frames
|
696 |
-
end_frame = start_frame + num_cond_frames - num_prefix_latent_frames
|
697 |
-
latents[:, :, start_frame:end_frame] = torch.lerp(
|
698 |
-
latents[:, :, start_frame:end_frame],
|
699 |
-
condition_latents[:, :, num_prefix_latent_frames:],
|
700 |
-
strength,
|
701 |
-
)
|
702 |
-
condition_latent_frames_mask[:, start_frame:end_frame] = strength
|
703 |
-
condition_latents = condition_latents[:, :, :num_prefix_latent_frames]
|
704 |
-
|
705 |
-
noise = randn_tensor(condition_latents.shape, generator=generator, device=device, dtype=dtype)
|
706 |
-
condition_latents = torch.lerp(noise, condition_latents, strength)
|
707 |
-
|
708 |
-
condition_video_ids = self._prepare_video_ids(
|
709 |
-
batch_size,
|
710 |
-
condition_latents.size(2),
|
711 |
-
latent_height,
|
712 |
-
latent_width,
|
713 |
-
patch_size=self.transformer_spatial_patch_size,
|
714 |
-
patch_size_t=self.transformer_temporal_patch_size,
|
715 |
-
device=device,
|
716 |
-
)
|
717 |
-
condition_video_ids = self._scale_video_ids(
|
718 |
-
condition_video_ids,
|
719 |
-
scale_factor=self.vae_spatial_compression_ratio,
|
720 |
-
scale_factor_t=self.vae_temporal_compression_ratio,
|
721 |
-
frame_index=frame_index,
|
722 |
-
device=device,
|
723 |
-
)
|
724 |
-
condition_latents = self._pack_latents(
|
725 |
-
condition_latents,
|
726 |
-
self.transformer_spatial_patch_size,
|
727 |
-
self.transformer_temporal_patch_size,
|
728 |
-
)
|
729 |
-
condition_conditioning_mask = torch.full(
|
730 |
-
condition_latents.shape[:2], strength, device=device, dtype=dtype
|
731 |
-
)
|
732 |
-
|
733 |
-
extra_conditioning_latents.append(condition_latents)
|
734 |
-
extra_conditioning_video_ids.append(condition_video_ids)
|
735 |
-
extra_conditioning_mask.append(condition_conditioning_mask)
|
736 |
-
extra_conditioning_num_latents += condition_latents.size(1)
|
737 |
-
|
738 |
-
video_ids = self._prepare_video_ids(
|
739 |
-
batch_size,
|
740 |
-
num_latent_frames,
|
741 |
-
latent_height,
|
742 |
-
latent_width,
|
743 |
-
patch_size_t=self.transformer_temporal_patch_size,
|
744 |
-
patch_size=self.transformer_spatial_patch_size,
|
745 |
-
device=device,
|
746 |
-
)
|
747 |
-
if len(conditions) > 0:
|
748 |
-
conditioning_mask = condition_latent_frames_mask.gather(1, video_ids[:, 0])
|
749 |
-
else:
|
750 |
-
conditioning_mask, extra_conditioning_num_latents = None, 0
|
751 |
-
video_ids = self._scale_video_ids(
|
752 |
-
video_ids,
|
753 |
-
scale_factor=self.vae_spatial_compression_ratio,
|
754 |
-
scale_factor_t=self.vae_temporal_compression_ratio,
|
755 |
-
frame_index=0,
|
756 |
-
device=device,
|
757 |
-
)
|
758 |
-
latents = self._pack_latents(
|
759 |
-
latents, self.transformer_spatial_patch_size, self.transformer_temporal_patch_size
|
760 |
-
)
|
761 |
-
|
762 |
-
if len(conditions) > 0 and len(extra_conditioning_latents) > 0:
|
763 |
-
latents = torch.cat([*extra_conditioning_latents, latents], dim=1)
|
764 |
-
video_ids = torch.cat([*extra_conditioning_video_ids, video_ids], dim=2)
|
765 |
-
conditioning_mask = torch.cat([*extra_conditioning_mask, conditioning_mask], dim=1)
|
766 |
-
|
767 |
-
return latents, conditioning_mask, video_ids, extra_conditioning_num_latents
|
768 |
-
|
769 |
-
@property
|
770 |
-
def guidance_scale(self):
|
771 |
-
return self._guidance_scale
|
772 |
-
|
773 |
-
@property
|
774 |
-
def do_classifier_free_guidance(self):
|
775 |
-
return self._guidance_scale > 1.0
|
776 |
-
|
777 |
-
@property
|
778 |
-
def num_timesteps(self):
|
779 |
-
return self._num_timesteps
|
780 |
-
|
781 |
-
@property
|
782 |
-
def current_timestep(self):
|
783 |
-
return self._current_timestep
|
784 |
-
|
785 |
-
@property
|
786 |
-
def attention_kwargs(self):
|
787 |
-
return self._attention_kwargs
|
788 |
-
|
789 |
-
@property
|
790 |
-
def interrupt(self):
|
791 |
-
return self._interrupt
|
792 |
-
|
793 |
-
@torch.no_grad()
|
794 |
-
@replace_example_docstring(EXAMPLE_DOC_STRING)
|
795 |
-
def __call__(
|
796 |
-
self,
|
797 |
-
conditions: Union[LTXVideoCondition, List[LTXVideoCondition]] = None,
|
798 |
-
image: Union[PipelineImageInput, List[PipelineImageInput]] = None,
|
799 |
-
video: List[PipelineImageInput] = None,
|
800 |
-
frame_index: Union[int, List[int]] = 0,
|
801 |
-
strength: Union[float, List[float]] = 1.0,
|
802 |
-
prompt: Union[str, List[str]] = None,
|
803 |
-
negative_prompt: Optional[Union[str, List[str]]] = None,
|
804 |
-
height: int = 512,
|
805 |
-
width: int = 704,
|
806 |
-
num_frames: int = 161,
|
807 |
-
frame_rate: int = 25,
|
808 |
-
num_inference_steps: int = 50,
|
809 |
-
timesteps: List[int] = None,
|
810 |
-
guidance_scale: float = 3,
|
811 |
-
image_cond_noise_scale: float = 0.15,
|
812 |
-
num_videos_per_prompt: Optional[int] = 1,
|
813 |
-
generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
|
814 |
-
latents: Optional[torch.Tensor] = None,
|
815 |
-
prompt_embeds: Optional[torch.Tensor] = None,
|
816 |
-
prompt_attention_mask: Optional[torch.Tensor] = None,
|
817 |
-
negative_prompt_embeds: Optional[torch.Tensor] = None,
|
818 |
-
negative_prompt_attention_mask: Optional[torch.Tensor] = None,
|
819 |
-
decode_timestep: Union[float, List[float]] = 0.0,
|
820 |
-
decode_noise_scale: Optional[Union[float, List[float]]] = None,
|
821 |
-
output_type: Optional[str] = "pil",
|
822 |
-
return_dict: bool = True,
|
823 |
-
attention_kwargs: Optional[Dict[str, Any]] = None,
|
824 |
-
callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
|
825 |
-
callback_on_step_end_tensor_inputs: List[str] = ["latents"],
|
826 |
-
max_sequence_length: int = 256,
|
827 |
-
):
|
828 |
-
r"""
|
829 |
-
Function invoked when calling the pipeline for generation.
|
830 |
-
|
831 |
-
Args:
|
832 |
-
conditions (`List[LTXVideoCondition], *optional*`):
|
833 |
-
The list of frame-conditioning items for the video generation.If not provided, conditions will be
|
834 |
-
created using `image`, `video`, `frame_index` and `strength`.
|
835 |
-
image (`PipelineImageInput` or `List[PipelineImageInput]`, *optional*):
|
836 |
-
The image or images to condition the video generation. If not provided, one has to pass `video` or
|
837 |
-
`conditions`.
|
838 |
-
video (`List[PipelineImageInput]`, *optional*):
|
839 |
-
The video to condition the video generation. If not provided, one has to pass `image` or `conditions`.
|
840 |
-
frame_index (`int` or `List[int]`, *optional*):
|
841 |
-
The frame index or frame indices at which the image or video will conditionally effect the video
|
842 |
-
generation. If not provided, one has to pass `conditions`.
|
843 |
-
strength (`float` or `List[float]`, *optional*):
|
844 |
-
The strength or strengths of the conditioning effect. If not provided, one has to pass `conditions`.
|
845 |
-
prompt (`str` or `List[str]`, *optional*):
|
846 |
-
The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
|
847 |
-
instead.
|
848 |
-
height (`int`, defaults to `512`):
|
849 |
-
The height in pixels of the generated image. This is set to 480 by default for the best results.
|
850 |
-
width (`int`, defaults to `704`):
|
851 |
-
The width in pixels of the generated image. This is set to 848 by default for the best results.
|
852 |
-
num_frames (`int`, defaults to `161`):
|
853 |
-
The number of video frames to generate
|
854 |
-
num_inference_steps (`int`, *optional*, defaults to 50):
|
855 |
-
The number of denoising steps. More denoising steps usually lead to a higher quality image at the
|
856 |
-
expense of slower inference.
|
857 |
-
timesteps (`List[int]`, *optional*):
|
858 |
-
Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
|
859 |
-
in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
|
860 |
-
passed will be used. Must be in descending order.
|
861 |
-
guidance_scale (`float`, defaults to `3 `):
|
862 |
-
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
|
863 |
-
`guidance_scale` is defined as `w` of equation 2. of [Imagen
|
864 |
-
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
|
865 |
-
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
|
866 |
-
usually at the expense of lower image quality.
|
867 |
-
num_videos_per_prompt (`int`, *optional*, defaults to 1):
|
868 |
-
The number of videos to generate per prompt.
|
869 |
-
generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
|
870 |
-
One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
|
871 |
-
to make generation deterministic.
|
872 |
-
latents (`torch.Tensor`, *optional*):
|
873 |
-
Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
|
874 |
-
generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
|
875 |
-
tensor will ge generated by sampling using the supplied random `generator`.
|
876 |
-
prompt_embeds (`torch.Tensor`, *optional*):
|
877 |
-
Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
|
878 |
-
provided, text embeddings will be generated from `prompt` input argument.
|
879 |
-
prompt_attention_mask (`torch.Tensor`, *optional*):
|
880 |
-
Pre-generated attention mask for text embeddings.
|
881 |
-
negative_prompt_embeds (`torch.FloatTensor`, *optional*):
|
882 |
-
Pre-generated negative text embeddings. For PixArt-Sigma this negative prompt should be "". If not
|
883 |
-
provided, negative_prompt_embeds will be generated from `negative_prompt` input argument.
|
884 |
-
negative_prompt_attention_mask (`torch.FloatTensor`, *optional*):
|
885 |
-
Pre-generated attention mask for negative text embeddings.
|
886 |
-
decode_timestep (`float`, defaults to `0.0`):
|
887 |
-
The timestep at which generated video is decoded.
|
888 |
-
decode_noise_scale (`float`, defaults to `None`):
|
889 |
-
The interpolation factor between random noise and denoised latents at the decode timestep.
|
890 |
-
output_type (`str`, *optional*, defaults to `"pil"`):
|
891 |
-
The output format of the generate image. Choose between
|
892 |
-
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
|
893 |
-
return_dict (`bool`, *optional*, defaults to `True`):
|
894 |
-
Whether or not to return a [`~pipelines.ltx.LTXPipelineOutput`] instead of a plain tuple.
|
895 |
-
attention_kwargs (`dict`, *optional*):
|
896 |
-
A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
|
897 |
-
`self.processor` in
|
898 |
-
[diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
|
899 |
-
callback_on_step_end (`Callable`, *optional*):
|
900 |
-
A function that calls at the end of each denoising steps during the inference. The function is called
|
901 |
-
with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
|
902 |
-
callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
|
903 |
-
`callback_on_step_end_tensor_inputs`.
|
904 |
-
callback_on_step_end_tensor_inputs (`List`, *optional*):
|
905 |
-
The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
|
906 |
-
will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
|
907 |
-
`._callback_tensor_inputs` attribute of your pipeline class.
|
908 |
-
max_sequence_length (`int` defaults to `128 `):
|
909 |
-
Maximum sequence length to use with the `prompt`.
|
910 |
-
|
911 |
-
Examples:
|
912 |
-
|
913 |
-
Returns:
|
914 |
-
[`~pipelines.ltx.LTXPipelineOutput`] or `tuple`:
|
915 |
-
If `return_dict` is `True`, [`~pipelines.ltx.LTXPipelineOutput`] is returned, otherwise a `tuple` is
|
916 |
-
returned where the first element is a list with the generated images.
|
917 |
-
"""
|
918 |
-
|
919 |
-
if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
|
920 |
-
callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
|
921 |
-
if latents is not None:
|
922 |
-
raise ValueError("Passing latents is not yet supported.")
|
923 |
-
|
924 |
-
# 1. Check inputs. Raise error if not correct
|
925 |
-
self.check_inputs(
|
926 |
-
prompt=prompt,
|
927 |
-
conditions=conditions,
|
928 |
-
image=image,
|
929 |
-
video=video,
|
930 |
-
frame_index=frame_index,
|
931 |
-
strength=strength,
|
932 |
-
height=height,
|
933 |
-
width=width,
|
934 |
-
callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
|
935 |
-
prompt_embeds=prompt_embeds,
|
936 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
937 |
-
prompt_attention_mask=prompt_attention_mask,
|
938 |
-
negative_prompt_attention_mask=negative_prompt_attention_mask,
|
939 |
-
)
|
940 |
-
|
941 |
-
self._guidance_scale = guidance_scale
|
942 |
-
self._attention_kwargs = attention_kwargs
|
943 |
-
self._interrupt = False
|
944 |
-
self._current_timestep = None
|
945 |
-
|
946 |
-
# 2. Define call parameters
|
947 |
-
if prompt is not None and isinstance(prompt, str):
|
948 |
-
batch_size = 1
|
949 |
-
elif prompt is not None and isinstance(prompt, list):
|
950 |
-
batch_size = len(prompt)
|
951 |
-
else:
|
952 |
-
batch_size = prompt_embeds.shape[0]
|
953 |
-
|
954 |
-
if conditions is not None:
|
955 |
-
if not isinstance(conditions, list):
|
956 |
-
conditions = [conditions]
|
957 |
-
|
958 |
-
strength = [condition.strength for condition in conditions]
|
959 |
-
frame_index = [condition.frame_index for condition in conditions]
|
960 |
-
image = [condition.image for condition in conditions]
|
961 |
-
video = [condition.video for condition in conditions]
|
962 |
-
elif image is not None or video is not None:
|
963 |
-
if not isinstance(image, list):
|
964 |
-
image = [image]
|
965 |
-
num_conditions = 1
|
966 |
-
elif isinstance(image, list):
|
967 |
-
num_conditions = len(image)
|
968 |
-
if not isinstance(video, list):
|
969 |
-
video = [video]
|
970 |
-
num_conditions = 1
|
971 |
-
elif isinstance(video, list):
|
972 |
-
num_conditions = len(video)
|
973 |
-
|
974 |
-
if not isinstance(frame_index, list):
|
975 |
-
frame_index = [frame_index] * num_conditions
|
976 |
-
if not isinstance(strength, list):
|
977 |
-
strength = [strength] * num_conditions
|
978 |
-
|
979 |
-
device = self._execution_device
|
980 |
-
|
981 |
-
# 3. Prepare text embeddings
|
982 |
-
(
|
983 |
-
prompt_embeds,
|
984 |
-
prompt_attention_mask,
|
985 |
-
negative_prompt_embeds,
|
986 |
-
negative_prompt_attention_mask,
|
987 |
-
) = self.encode_prompt(
|
988 |
-
prompt=prompt,
|
989 |
-
negative_prompt=negative_prompt,
|
990 |
-
do_classifier_free_guidance=self.do_classifier_free_guidance,
|
991 |
-
num_videos_per_prompt=num_videos_per_prompt,
|
992 |
-
prompt_embeds=prompt_embeds,
|
993 |
-
negative_prompt_embeds=negative_prompt_embeds,
|
994 |
-
prompt_attention_mask=prompt_attention_mask,
|
995 |
-
negative_prompt_attention_mask=negative_prompt_attention_mask,
|
996 |
-
max_sequence_length=max_sequence_length,
|
997 |
-
device=device,
|
998 |
-
)
|
999 |
-
if self.do_classifier_free_guidance:
|
1000 |
-
prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
|
1001 |
-
prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0)
|
1002 |
-
|
1003 |
-
vae_dtype = self.vae.dtype
|
1004 |
-
|
1005 |
-
conditioning_tensors = []
|
1006 |
-
is_conditioning_image_or_video = image is not None or video is not None
|
1007 |
-
if is_conditioning_image_or_video:
|
1008 |
-
for condition_image, condition_video, condition_frame_index, condition_strength in zip(
|
1009 |
-
image, video, frame_index, strength
|
1010 |
-
):
|
1011 |
-
if condition_image is not None:
|
1012 |
-
condition_tensor = (
|
1013 |
-
self.video_processor.preprocess(condition_image, height, width)
|
1014 |
-
.unsqueeze(2)
|
1015 |
-
.to(device, dtype=vae_dtype)
|
1016 |
-
)
|
1017 |
-
elif condition_video is not None:
|
1018 |
-
condition_tensor = self.video_processor.preprocess_video(condition_video, height, width)
|
1019 |
-
num_frames_input = condition_tensor.size(2)
|
1020 |
-
num_frames_output = self.trim_conditioning_sequence(
|
1021 |
-
condition_frame_index, num_frames_input, num_frames
|
1022 |
-
)
|
1023 |
-
condition_tensor = condition_tensor[:, :, :num_frames_output]
|
1024 |
-
condition_tensor = condition_tensor.to(device, dtype=vae_dtype)
|
1025 |
-
else:
|
1026 |
-
raise ValueError("Either `image` or `video` must be provided for conditioning.")
|
1027 |
-
|
1028 |
-
if condition_tensor.size(2) % self.vae_temporal_compression_ratio != 1:
|
1029 |
-
raise ValueError(
|
1030 |
-
f"Number of frames in the video must be of the form (k * {self.vae_temporal_compression_ratio} + 1) "
|
1031 |
-
f"but got {condition_tensor.size(2)} frames."
|
1032 |
-
)
|
1033 |
-
conditioning_tensors.append(condition_tensor)
|
1034 |
-
|
1035 |
-
# 4. Prepare latent variables
|
1036 |
-
num_channels_latents = self.transformer.config.in_channels
|
1037 |
-
latents, conditioning_mask, video_coords, extra_conditioning_num_latents = self.prepare_latents(
|
1038 |
-
conditioning_tensors,
|
1039 |
-
strength,
|
1040 |
-
frame_index,
|
1041 |
-
batch_size=batch_size * num_videos_per_prompt,
|
1042 |
-
num_channels_latents=num_channels_latents,
|
1043 |
-
height=height,
|
1044 |
-
width=width,
|
1045 |
-
num_frames=num_frames,
|
1046 |
-
generator=generator,
|
1047 |
-
device=device,
|
1048 |
-
dtype=torch.float32,
|
1049 |
-
)
|
1050 |
-
|
1051 |
-
video_coords = video_coords.float()
|
1052 |
-
video_coords[:, 0] = video_coords[:, 0] * (1.0 / frame_rate)
|
1053 |
-
|
1054 |
-
init_latents = latents.clone() if is_conditioning_image_or_video else None
|
1055 |
-
|
1056 |
-
if self.do_classifier_free_guidance:
|
1057 |
-
video_coords = torch.cat([video_coords, video_coords], dim=0)
|
1058 |
-
|
1059 |
-
# 5. Prepare timesteps
|
1060 |
-
latent_num_frames = (num_frames - 1) // self.vae_temporal_compression_ratio + 1
|
1061 |
-
latent_height = height // self.vae_spatial_compression_ratio
|
1062 |
-
latent_width = width // self.vae_spatial_compression_ratio
|
1063 |
-
sigmas = linear_quadratic_schedule(num_inference_steps)
|
1064 |
-
timesteps = sigmas * 1000
|
1065 |
-
timesteps, num_inference_steps = retrieve_timesteps(
|
1066 |
-
self.scheduler,
|
1067 |
-
num_inference_steps,
|
1068 |
-
device,
|
1069 |
-
timesteps=timesteps,
|
1070 |
-
)
|
1071 |
-
num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
|
1072 |
-
self._num_timesteps = len(timesteps)
|
1073 |
-
|
1074 |
-
# 6. Denoising loop
|
1075 |
-
with self.progress_bar(total=num_inference_steps) as progress_bar:
|
1076 |
-
for i, t in enumerate(timesteps):
|
1077 |
-
if self.interrupt:
|
1078 |
-
continue
|
1079 |
-
|
1080 |
-
self._current_timestep = t
|
1081 |
-
|
1082 |
-
if image_cond_noise_scale > 0 and init_latents is not None:
|
1083 |
-
# Add timestep-dependent noise to the hard-conditioning latents
|
1084 |
-
# This helps with motion continuity, especially when conditioned on a single frame
|
1085 |
-
latents = self.add_noise_to_image_conditioning_latents(
|
1086 |
-
t / 1000.0,
|
1087 |
-
init_latents,
|
1088 |
-
latents,
|
1089 |
-
image_cond_noise_scale,
|
1090 |
-
conditioning_mask,
|
1091 |
-
generator,
|
1092 |
-
)
|
1093 |
-
|
1094 |
-
latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
|
1095 |
-
if is_conditioning_image_or_video:
|
1096 |
-
conditioning_mask_model_input = (
|
1097 |
-
torch.cat([conditioning_mask, conditioning_mask])
|
1098 |
-
if self.do_classifier_free_guidance
|
1099 |
-
else conditioning_mask
|
1100 |
-
)
|
1101 |
-
latent_model_input = latent_model_input.to(prompt_embeds.dtype)
|
1102 |
-
|
1103 |
-
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
|
1104 |
-
timestep = t.expand(latent_model_input.shape[0]).unsqueeze(-1).float()
|
1105 |
-
if is_conditioning_image_or_video:
|
1106 |
-
timestep = torch.min(timestep, (1 - conditioning_mask_model_input) * 1000.0)
|
1107 |
-
|
1108 |
-
noise_pred = self.transformer(
|
1109 |
-
hidden_states=latent_model_input,
|
1110 |
-
encoder_hidden_states=prompt_embeds,
|
1111 |
-
timestep=timestep,
|
1112 |
-
encoder_attention_mask=prompt_attention_mask,
|
1113 |
-
video_coords=video_coords,
|
1114 |
-
attention_kwargs=attention_kwargs,
|
1115 |
-
return_dict=False,
|
1116 |
-
)[0]
|
1117 |
-
|
1118 |
-
if self.do_classifier_free_guidance:
|
1119 |
-
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
|
1120 |
-
noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
|
1121 |
-
timestep, _ = timestep.chunk(2)
|
1122 |
-
|
1123 |
-
denoised_latents = self.scheduler.step(
|
1124 |
-
-noise_pred, t, latents, per_token_timesteps=timestep, return_dict=False
|
1125 |
-
)[0]
|
1126 |
-
if is_conditioning_image_or_video:
|
1127 |
-
tokens_to_denoise_mask = (t / 1000 - 1e-6 < (1.0 - conditioning_mask)).unsqueeze(-1)
|
1128 |
-
latents = torch.where(tokens_to_denoise_mask, denoised_latents, latents)
|
1129 |
-
else:
|
1130 |
-
latents = denoised_latents
|
1131 |
-
|
1132 |
-
if callback_on_step_end is not None:
|
1133 |
-
callback_kwargs = {}
|
1134 |
-
for k in callback_on_step_end_tensor_inputs:
|
1135 |
-
callback_kwargs[k] = locals()[k]
|
1136 |
-
callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
|
1137 |
-
|
1138 |
-
latents = callback_outputs.pop("latents", latents)
|
1139 |
-
prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
|
1140 |
-
|
1141 |
-
# call the callback, if provided
|
1142 |
-
if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
|
1143 |
-
progress_bar.update()
|
1144 |
-
|
1145 |
-
if XLA_AVAILABLE:
|
1146 |
-
xm.mark_step()
|
1147 |
-
|
1148 |
-
if is_conditioning_image_or_video:
|
1149 |
-
latents = latents[:, extra_conditioning_num_latents:]
|
1150 |
-
|
1151 |
-
latents = self._unpack_latents(
|
1152 |
-
latents,
|
1153 |
-
latent_num_frames,
|
1154 |
-
latent_height,
|
1155 |
-
latent_width,
|
1156 |
-
self.transformer_spatial_patch_size,
|
1157 |
-
self.transformer_temporal_patch_size,
|
1158 |
-
)
|
1159 |
-
|
1160 |
-
if output_type == "latent":
|
1161 |
-
video = latents
|
1162 |
-
else:
|
1163 |
-
latents = self._denormalize_latents(
|
1164 |
-
latents, self.vae.latents_mean, self.vae.latents_std, self.vae.config.scaling_factor
|
1165 |
-
)
|
1166 |
-
latents = latents.to(prompt_embeds.dtype)
|
1167 |
-
|
1168 |
-
if not self.vae.config.timestep_conditioning:
|
1169 |
-
timestep = None
|
1170 |
-
else:
|
1171 |
-
noise = torch.randn(latents.shape, generator=generator, device=device, dtype=latents.dtype)
|
1172 |
-
if not isinstance(decode_timestep, list):
|
1173 |
-
decode_timestep = [decode_timestep] * batch_size
|
1174 |
-
if decode_noise_scale is None:
|
1175 |
-
decode_noise_scale = decode_timestep
|
1176 |
-
elif not isinstance(decode_noise_scale, list):
|
1177 |
-
decode_noise_scale = [decode_noise_scale] * batch_size
|
1178 |
-
|
1179 |
-
timestep = torch.tensor(decode_timestep, device=device, dtype=latents.dtype)
|
1180 |
-
decode_noise_scale = torch.tensor(decode_noise_scale, device=device, dtype=latents.dtype)[
|
1181 |
-
:, None, None, None, None
|
1182 |
-
]
|
1183 |
-
latents = (1 - decode_noise_scale) * latents + decode_noise_scale * noise
|
1184 |
-
|
1185 |
-
video = self.vae.decode(latents, timestep, return_dict=False)[0]
|
1186 |
-
video = self.video_processor.postprocess_video(video, output_type=output_type)
|
1187 |
-
|
1188 |
-
# Offload all models
|
1189 |
-
self.maybe_free_model_hooks()
|
1190 |
-
|
1191 |
-
if not return_dict:
|
1192 |
-
return (video,)
|
1193 |
-
|
1194 |
-
return LTXPipelineOutput(frames=video)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|