Sapir Weissbuch commited on
Commit
cc0327d
·
unverified ·
2 Parent(s): 8aab836 c811a04

Merge pull request #1 from LightricksResearch/eval-pipeline

Browse files
poetry.lock DELETED
The diff for this file is too large to render. See raw diff
 
pyproject.toml DELETED
@@ -1,16 +0,0 @@
1
- [tool.poetry]
2
- name = "xora-core"
3
- version = "0.1.0"
4
- description = ""
5
- authors = ["Your Name <[email protected]>"]
6
- readme = "README.md"
7
-
8
- [tool.poetry.dependencies]
9
- python = "^3.10"
10
- torch = "2.3.0"
11
- diffusers = "0.28.2"
12
-
13
-
14
- [build-system]
15
- requires = ["poetry-core"]
16
- build-backend = "poetry.core.masonry.api"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.34.2 ; python_version >= "3.10" and python_version < "4.0"
2
+ certifi==2024.8.30 ; python_version >= "3.10" and python_version < "4.0"
3
+ charset-normalizer==3.3.2 ; python_version >= "3.10" and python_version < "4.0"
4
+ colorama==0.4.6 ; python_version >= "3.10" and python_version < "4.0" and platform_system == "Windows"
5
+ diffusers==0.28.2 ; python_version >= "3.10" and python_version < "4.0"
6
+ einops==0.6.1 ; python_version >= "3.10" and python_version < "4.0"
7
+ filelock==3.16.1 ; python_version >= "3.10" and python_version < "4.0"
8
+ fsspec==2024.9.0 ; python_version >= "3.10" and python_version < "4.0"
9
+ huggingface-hub==0.25.1 ; python_version >= "3.10" and python_version < "4.0"
10
+ idna==3.10 ; python_version >= "3.10" and python_version < "4.0"
11
+ importlib-metadata==8.5.0 ; python_version >= "3.10" and python_version < "4.0"
12
+ intel-openmp==2021.4.0 ; python_version >= "3.10" and python_version < "4.0" and platform_system == "Windows"
13
+ jinja2==3.1.4 ; python_version >= "3.10" and python_version < "4.0"
14
+ markupsafe==2.1.5 ; python_version >= "3.10" and python_version < "4.0"
15
+ mkl==2021.4.0 ; python_version >= "3.10" and python_version < "4.0" and platform_system == "Windows"
16
+ mpmath==1.3.0 ; python_version >= "3.10" and python_version < "4.0"
17
+ networkx==3.3 ; python_version >= "3.10" and python_version < "4.0"
18
+ numpy==2.1.1 ; python_version >= "3.10" and python_version < "4.0"
19
+ nvidia-cublas-cu12==12.1.3.1 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
20
+ nvidia-cuda-cupti-cu12==12.1.105 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
21
+ nvidia-cuda-nvrtc-cu12==12.1.105 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
22
+ nvidia-cuda-runtime-cu12==12.1.105 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
23
+ nvidia-cudnn-cu12==8.9.2.26 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
24
+ nvidia-cufft-cu12==11.0.2.54 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
25
+ nvidia-curand-cu12==10.3.2.106 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
26
+ nvidia-cusolver-cu12==11.4.5.107 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
27
+ nvidia-cusparse-cu12==12.1.0.106 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
28
+ nvidia-nccl-cu12==2.20.5 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
29
+ nvidia-nvjitlink-cu12==12.6.68 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
30
+ nvidia-nvtx-cu12==12.1.105 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.10" and python_version < "4.0"
31
+ packaging==24.1 ; python_version >= "3.10" and python_version < "4.0"
32
+ pillow==10.4.0 ; python_version >= "3.10" and python_version < "4.0"
33
+ psutil==6.0.0 ; python_version >= "3.10" and python_version < "4.0"
34
+ pyyaml==6.0.2 ; python_version >= "3.10" and python_version < "4.0"
35
+ regex==2024.9.11 ; python_version >= "3.10" and python_version < "4.0"
36
+ requests==2.32.3 ; python_version >= "3.10" and python_version < "4.0"
37
+ safetensors==0.4.5 ; python_version >= "3.10" and python_version < "4.0"
38
+ sentencepiece==0.2.0 ; python_version >= "3.10" and python_version < "4.0"
39
+ sympy==1.13.3 ; python_version >= "3.10" and python_version < "4.0"
40
+ tbb==2021.13.1 ; python_version >= "3.10" and python_version < "4.0" and platform_system == "Windows"
41
+ tokenizers==0.19.1 ; python_version >= "3.10" and python_version < "4.0"
42
+ torch==2.3.0 ; python_version >= "3.10" and python_version < "4.0"
43
+ tqdm==4.66.5 ; python_version >= "3.10" and python_version < "4.0"
44
+ transformers==4.44.2 ; python_version >= "3.10" and python_version < "4.0"
45
+ triton==2.3.0 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version < "3.12" and python_version >= "3.10"
46
+ typing-extensions==4.12.2 ; python_version >= "3.10" and python_version < "4.0"
47
+ urllib3==2.2.3 ; python_version >= "3.10" and python_version < "4.0"
48
+ zipp==3.20.2 ; python_version >= "3.10" and python_version < "4.0"
setup.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from setuptools import setup, find_packages
2
+ def parse_requirements(filename):
3
+ """Load requirements from a pip requirements file."""
4
+ with open(filename, 'r') as file:
5
+ return file.read().splitlines()
6
+
7
+
8
+ setup(
9
+ name="xora-core", # The name of your package
10
+ version="0.1.0", # Version number of the package
11
+ description="A package for Xora model inferece", # Short description
12
+ author="YSapir Weissbuch", # Your name
13
+ author_email="[email protected]", # Your email
14
+ url="https://github.com/LightricksResearch/xora-core", # URL for the project (GitHub, etc.)
15
+ packages=find_packages(), # Automatically find all packages inside `xora`
16
+ install_requires=parse_requirements('requirements.txt'), # Install dependencies from requirements.txt
17
+ classifiers=[
18
+ 'Programming Language :: Python :: 3',
19
+ 'License :: OSI Approved :: MIT License',
20
+ 'Operating System :: OS Independent',
21
+ ],
22
+ python_requires='>=3.10', # Specify Python version compatibility
23
+ )
xora/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ from .pipelines import *
xora/examples/text_to_video.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from xora.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
3
+ from xora.models.transformers.transformer3d import Transformer3DModel
4
+ from xora.models.transformers.symmetric_patchifier import SymmetricPatchifier
5
+ from xora.schedulers.rf import RectifiedFlowScheduler
6
+ from xora.pipelines.pipeline_video_pixart_alpha import VideoPixArtAlphaPipeline
7
+ from pathlib import Path
8
+ from transformers import T5EncoderModel
9
+
10
+
11
+ model_name_or_path = "PixArt-alpha/PixArt-XL-2-1024-MS"
12
+ vae_local_path = Path("/opt/models/checkpoints/vae_training/causal_vvae_32x32x8_420m_cont_32/step_2296000")
13
+ dtype = torch.float32
14
+ vae = CausalVideoAutoencoder.from_pretrained(
15
+ pretrained_model_name_or_path=vae_local_path,
16
+ revision=False,
17
+ torch_dtype=torch.bfloat16,
18
+ load_in_8bit=False,
19
+ ).cuda()
20
+ transformer_config_path = Path("/opt/txt2img/txt2img/config/transformer3d/xora_v1.2-L.json")
21
+ transformer_config = Transformer3DModel.load_config(transformer_config_path)
22
+ transformer = Transformer3DModel.from_config(transformer_config)
23
+ transformer_local_path = Path("/opt/models/logs/v1.2-vae-mf-medHR-mr-cvae-nl/ckpt/01760000/model.pt")
24
+ transformer_ckpt_state_dict = torch.load(transformer_local_path)
25
+ transformer.load_state_dict(transformer_ckpt_state_dict, True)
26
+ transformer = transformer.cuda()
27
+ unet = transformer
28
+ scheduler_config_path = Path("/opt/txt2img/txt2img/config/scheduler/RF_SD3_shifted.json")
29
+ scheduler_config = RectifiedFlowScheduler.load_config(scheduler_config_path)
30
+ scheduler = RectifiedFlowScheduler.from_config(scheduler_config)
31
+ patchifier = SymmetricPatchifier(patch_size=1)
32
+ # text_encoder = T5EncoderModel.from_pretrained("t5-v1_1-xxl")
33
+
34
+ submodel_dict = {
35
+ "unet": unet,
36
+ "transformer": transformer,
37
+ "patchifier": patchifier,
38
+ "text_encoder": None,
39
+ "scheduler": scheduler,
40
+ "vae": vae,
41
+
42
+ }
43
+
44
+ pipeline = VideoPixArtAlphaPipeline.from_pretrained(model_name_or_path,
45
+ safety_checker=None,
46
+ revision=None,
47
+ torch_dtype=dtype,
48
+ **submodel_dict,
49
+ )
50
+
51
+ num_inference_steps=20
52
+ num_images_per_prompt=2
53
+ guidance_scale=3
54
+ height=512
55
+ width=768
56
+ num_frames=57
57
+ frame_rate=25
58
+ # sample = {
59
+ # "prompt": "A cat", # (B, L, E)
60
+ # 'prompt_attention_mask': None, # (B , L)
61
+ # 'negative_prompt': "Ugly deformed",
62
+ # 'negative_prompt_attention_mask': None # (B , L)
63
+ # }
64
+
65
+ sample = torch.load("/opt/sample.pt")
66
+ for _, item in sample.items():
67
+ if item is not None:
68
+ item = item.cuda()
69
+
70
+
71
+
72
+ images = pipeline(
73
+ num_inference_steps=num_inference_steps,
74
+ num_images_per_prompt=num_images_per_prompt,
75
+ guidance_scale=guidance_scale,
76
+ generator=None,
77
+ output_type="pt",
78
+ callback_on_step_end=None,
79
+ height=height,
80
+ width=width,
81
+ num_frames=num_frames,
82
+ frame_rate=frame_rate,
83
+ **sample,
84
+ is_video=True,
85
+ vae_per_channel_normalize=True,
86
+ ).images
87
+
88
+ print()
xora/models/__init__.py ADDED
File without changes
xora/models/autoencoders/__init__.py ADDED
File without changes
xora/models/autoencoders/causal_conv3d.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple, Union
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+
6
+
7
+ class CausalConv3d(nn.Module):
8
+ def __init__(
9
+ self,
10
+ in_channels,
11
+ out_channels,
12
+ kernel_size: int = 3,
13
+ stride: Union[int, Tuple[int]] = 1,
14
+ **kwargs,
15
+ ):
16
+ super().__init__()
17
+
18
+ self.in_channels = in_channels
19
+ self.out_channels = out_channels
20
+
21
+ kernel_size = (kernel_size, kernel_size, kernel_size)
22
+ self.time_kernel_size = kernel_size[0]
23
+
24
+ dilation = kwargs.pop("dilation", 1)
25
+ dilation = (dilation, 1, 1)
26
+
27
+ height_pad = kernel_size[1] // 2
28
+ width_pad = kernel_size[2] // 2
29
+ padding = (0, height_pad, width_pad)
30
+
31
+ self.conv = nn.Conv3d(
32
+ in_channels,
33
+ out_channels,
34
+ kernel_size,
35
+ stride=stride,
36
+ dilation=dilation,
37
+ padding=padding,
38
+ padding_mode="zeros",
39
+ )
40
+
41
+ def forward(self, x, causal: bool = True):
42
+ if causal:
43
+ first_frame_pad = x[:, :, :1, :, :].repeat((1, 1, self.time_kernel_size - 1, 1, 1))
44
+ x = torch.concatenate((first_frame_pad, x), dim=2)
45
+ else:
46
+ first_frame_pad = x[:, :, :1, :, :].repeat((1, 1, (self.time_kernel_size - 1) // 2, 1, 1))
47
+ last_frame_pad = x[:, :, -1:, :, :].repeat((1, 1, (self.time_kernel_size - 1) // 2, 1, 1))
48
+ x = torch.concatenate((first_frame_pad, x, last_frame_pad), dim=2)
49
+ x = self.conv(x)
50
+ return x
51
+
52
+ @property
53
+ def weight(self):
54
+ return self.conv.weight
xora/models/autoencoders/causal_video_autoencoder.py ADDED
@@ -0,0 +1,763 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+ from functools import partial
4
+ from types import SimpleNamespace
5
+ from typing import Any, Mapping, Optional, Tuple, Union, List
6
+
7
+ import torch
8
+ import numpy as np
9
+ from einops import rearrange
10
+ from torch import nn
11
+
12
+ from xora.models.autoencoders.conv_nd_factory import make_conv_nd, make_linear_nd
13
+ from xora.models.autoencoders.pixel_norm import PixelNorm
14
+ from xora.models.autoencoders.vae import AutoencoderKLWrapper
15
+
16
+
17
+ class CausalVideoAutoencoder(AutoencoderKLWrapper):
18
+ @classmethod
19
+ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *args, **kwargs):
20
+ config_local_path = pretrained_model_name_or_path / "config.json"
21
+ config = cls.load_config(config_local_path, **kwargs)
22
+ video_vae = cls.from_config(config)
23
+ video_vae.to(kwargs["torch_dtype"])
24
+
25
+ model_local_path = pretrained_model_name_or_path / "autoencoder.pth"
26
+ ckpt_state_dict = torch.load(model_local_path, map_location=torch.device("cpu"))
27
+ video_vae.load_state_dict(ckpt_state_dict)
28
+
29
+ statistics_local_path = pretrained_model_name_or_path / "per_channel_statistics.json"
30
+ if statistics_local_path.exists():
31
+ with open(statistics_local_path, "r") as file:
32
+ data = json.load(file)
33
+ transposed_data = list(zip(*data["data"]))
34
+ data_dict = {col: torch.tensor(vals) for col, vals in zip(data["columns"], transposed_data)}
35
+ video_vae.register_buffer("std_of_means", data_dict["std-of-means"])
36
+ video_vae.register_buffer(
37
+ "mean_of_means", data_dict.get("mean-of-means", torch.zeros_like(data_dict["std-of-means"]))
38
+ )
39
+
40
+ return video_vae
41
+
42
+ @staticmethod
43
+ def from_config(config):
44
+ assert config["_class_name"] == "CausalVideoAutoencoder", "config must have _class_name=CausalVideoAutoencoder"
45
+ if isinstance(config["dims"], list):
46
+ config["dims"] = tuple(config["dims"])
47
+
48
+ assert config["dims"] in [2, 3, (2, 1)], "dims must be 2, 3 or (2, 1)"
49
+
50
+ double_z = config.get("double_z", True)
51
+ latent_log_var = config.get("latent_log_var", "per_channel" if double_z else "none")
52
+ use_quant_conv = config.get("use_quant_conv", True)
53
+
54
+ if use_quant_conv and latent_log_var == "uniform":
55
+ raise ValueError("uniform latent_log_var requires use_quant_conv=False")
56
+
57
+ encoder = Encoder(
58
+ dims=config["dims"],
59
+ in_channels=config.get("in_channels", 3),
60
+ out_channels=config["latent_channels"],
61
+ blocks=config["blocks"],
62
+ patch_size=config.get("patch_size", 1),
63
+ latent_log_var=latent_log_var,
64
+ norm_layer=config.get("norm_layer", "group_norm"),
65
+ )
66
+
67
+ decoder = Decoder(
68
+ dims=config["dims"],
69
+ in_channels=config["latent_channels"],
70
+ out_channels=config.get("out_channels", 3),
71
+ blocks=config["blocks"],
72
+ patch_size=config.get("patch_size", 1),
73
+ norm_layer=config.get("norm_layer", "group_norm"),
74
+ causal=config.get("causal_decoder", False),
75
+ )
76
+
77
+ dims = config["dims"]
78
+ return CausalVideoAutoencoder(
79
+ encoder=encoder,
80
+ decoder=decoder,
81
+ latent_channels=config["latent_channels"],
82
+ dims=dims,
83
+ use_quant_conv=use_quant_conv,
84
+ )
85
+
86
+ @property
87
+ def config(self):
88
+ return SimpleNamespace(
89
+ _class_name="CausalVideoAutoencoder",
90
+ dims=self.dims,
91
+ in_channels=self.encoder.conv_in.in_channels // self.encoder.patch_size**2,
92
+ out_channels=self.decoder.conv_out.out_channels // self.decoder.patch_size**2,
93
+ latent_channels=self.decoder.conv_in.in_channels,
94
+ blocks=self.encoder.blocks_desc,
95
+ scaling_factor=1.0,
96
+ norm_layer=self.encoder.norm_layer,
97
+ patch_size=self.encoder.patch_size,
98
+ latent_log_var=self.encoder.latent_log_var,
99
+ use_quant_conv=self.use_quant_conv,
100
+ causal_decoder=self.decoder.causal,
101
+ )
102
+
103
+ @property
104
+ def is_video_supported(self):
105
+ """
106
+ Check if the model supports video inputs of shape (B, C, F, H, W). Otherwise, the model only supports 2D images.
107
+ """
108
+ return self.dims != 2
109
+
110
+ @property
111
+ def spatial_downscale_factor(self):
112
+ return (
113
+ 2 ** len([block for block in self.encoder.blocks_desc if block[0] in ["compress_space", "compress_all"]])
114
+ * self.encoder.patch_size
115
+ )
116
+
117
+ @property
118
+ def temporal_downscale_factor(self):
119
+ return 2 ** len([block for block in self.encoder.blocks_desc if block[0] in ["compress_time", "compress_all"]])
120
+
121
+ def to_json_string(self) -> str:
122
+ import json
123
+
124
+ return json.dumps(self.config.__dict__)
125
+
126
+ def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):
127
+ model_keys = set(name for name, _ in self.named_parameters())
128
+
129
+ key_mapping = {
130
+ ".resnets.": ".res_blocks.",
131
+ "downsamplers.0": "downsample",
132
+ "upsamplers.0": "upsample",
133
+ }
134
+
135
+ converted_state_dict = {}
136
+ for key, value in state_dict.items():
137
+ for k, v in key_mapping.items():
138
+ key = key.replace(k, v)
139
+
140
+ if "norm" in key and key not in model_keys:
141
+ print(f"Removing key {key} from state_dict as it is not present in the model")
142
+ continue
143
+
144
+ converted_state_dict[key] = value
145
+
146
+ super().load_state_dict(converted_state_dict, strict=strict)
147
+
148
+ def last_layer(self):
149
+ if hasattr(self.decoder, "conv_out"):
150
+ if isinstance(self.decoder.conv_out, nn.Sequential):
151
+ last_layer = self.decoder.conv_out[-1]
152
+ else:
153
+ last_layer = self.decoder.conv_out
154
+ else:
155
+ last_layer = self.decoder.layers[-1]
156
+ return last_layer
157
+
158
+
159
+ class Encoder(nn.Module):
160
+ r"""
161
+ The `Encoder` layer of a variational autoencoder that encodes its input into a latent representation.
162
+
163
+ Args:
164
+ dims (`int` or `Tuple[int, int]`, *optional*, defaults to 3):
165
+ The number of dimensions to use in convolutions.
166
+ in_channels (`int`, *optional*, defaults to 3):
167
+ The number of input channels.
168
+ out_channels (`int`, *optional*, defaults to 3):
169
+ The number of output channels.
170
+ blocks (`List[Tuple[str, int]]`, *optional*, defaults to `[("res_x", 1)]`):
171
+ The blocks to use. Each block is a tuple of the block name and the number of layers.
172
+ base_channels (`int`, *optional*, defaults to 128):
173
+ The number of output channels for the first convolutional layer.
174
+ norm_num_groups (`int`, *optional*, defaults to 32):
175
+ The number of groups for normalization.
176
+ patch_size (`int`, *optional*, defaults to 1):
177
+ The patch size to use. Should be a power of 2.
178
+ norm_layer (`str`, *optional*, defaults to `group_norm`):
179
+ The normalization layer to use. Can be either `group_norm` or `pixel_norm`.
180
+ latent_log_var (`str`, *optional*, defaults to `per_channel`):
181
+ The number of channels for the log variance. Can be either `per_channel`, `uniform`, or `none`.
182
+ """
183
+
184
+ def __init__(
185
+ self,
186
+ dims: Union[int, Tuple[int, int]] = 3,
187
+ in_channels: int = 3,
188
+ out_channels: int = 3,
189
+ blocks: List[Tuple[str, int]] = [("res_x", 1)],
190
+ base_channels: int = 128,
191
+ norm_num_groups: int = 32,
192
+ patch_size: Union[int, Tuple[int]] = 1,
193
+ norm_layer: str = "group_norm", # group_norm, pixel_norm
194
+ latent_log_var: str = "per_channel",
195
+ ):
196
+ super().__init__()
197
+ self.patch_size = patch_size
198
+ self.norm_layer = norm_layer
199
+ self.latent_channels = out_channels
200
+ self.latent_log_var = latent_log_var
201
+ self.blocks_desc = blocks
202
+
203
+ in_channels = in_channels * patch_size**2
204
+ output_channel = base_channels
205
+
206
+ self.conv_in = make_conv_nd(
207
+ dims=dims,
208
+ in_channels=in_channels,
209
+ out_channels=output_channel,
210
+ kernel_size=3,
211
+ stride=1,
212
+ padding=1,
213
+ causal=True,
214
+ )
215
+
216
+ self.down_blocks = nn.ModuleList([])
217
+
218
+ for block_name, num_layers in blocks:
219
+ input_channel = output_channel
220
+
221
+ if block_name == "res_x":
222
+ block = UNetMidBlock3D(
223
+ dims=dims,
224
+ in_channels=input_channel,
225
+ num_layers=num_layers,
226
+ resnet_eps=1e-6,
227
+ resnet_groups=norm_num_groups,
228
+ norm_layer=norm_layer,
229
+ )
230
+ elif block_name == "res_x_y":
231
+ output_channel = 2 * output_channel
232
+ block = ResnetBlock3D(
233
+ dims=dims,
234
+ in_channels=input_channel,
235
+ out_channels=output_channel,
236
+ eps=1e-6,
237
+ groups=norm_num_groups,
238
+ norm_layer=norm_layer,
239
+ )
240
+ elif block_name == "compress_time":
241
+ block = make_conv_nd(
242
+ dims=dims,
243
+ in_channels=input_channel,
244
+ out_channels=output_channel,
245
+ kernel_size=3,
246
+ stride=(2, 1, 1),
247
+ causal=True,
248
+ )
249
+ elif block_name == "compress_space":
250
+ block = make_conv_nd(
251
+ dims=dims,
252
+ in_channels=input_channel,
253
+ out_channels=output_channel,
254
+ kernel_size=3,
255
+ stride=(1, 2, 2),
256
+ causal=True,
257
+ )
258
+ elif block_name == "compress_all":
259
+ block = make_conv_nd(
260
+ dims=dims,
261
+ in_channels=input_channel,
262
+ out_channels=output_channel,
263
+ kernel_size=3,
264
+ stride=(2, 2, 2),
265
+ causal=True,
266
+ )
267
+ else:
268
+ raise ValueError(f"unknown block: {block_name}")
269
+
270
+ self.down_blocks.append(block)
271
+
272
+ # out
273
+ if norm_layer == "group_norm":
274
+ self.conv_norm_out = nn.GroupNorm(num_channels=output_channel, num_groups=norm_num_groups, eps=1e-6)
275
+ elif norm_layer == "pixel_norm":
276
+ self.conv_norm_out = PixelNorm()
277
+ elif norm_layer == "layer_norm":
278
+ self.conv_norm_out = LayerNorm(output_channel, eps=1e-6)
279
+
280
+ self.conv_act = nn.SiLU()
281
+
282
+ conv_out_channels = out_channels
283
+ if latent_log_var == "per_channel":
284
+ conv_out_channels *= 2
285
+ elif latent_log_var == "uniform":
286
+ conv_out_channels += 1
287
+ elif latent_log_var != "none":
288
+ raise ValueError(f"Invalid latent_log_var: {latent_log_var}")
289
+ self.conv_out = make_conv_nd(dims, output_channel, conv_out_channels, 3, padding=1, causal=True)
290
+
291
+ self.gradient_checkpointing = False
292
+
293
+ def forward(self, sample: torch.FloatTensor) -> torch.FloatTensor:
294
+ r"""The forward method of the `Encoder` class."""
295
+
296
+ sample = patchify(sample, patch_size_hw=self.patch_size, patch_size_t=1)
297
+ sample = self.conv_in(sample)
298
+
299
+ checkpoint_fn = (
300
+ partial(torch.utils.checkpoint.checkpoint, use_reentrant=False)
301
+ if self.gradient_checkpointing and self.training
302
+ else lambda x: x
303
+ )
304
+
305
+ for down_block in self.down_blocks:
306
+ sample = checkpoint_fn(down_block)(sample)
307
+
308
+ sample = self.conv_norm_out(sample)
309
+ sample = self.conv_act(sample)
310
+ sample = self.conv_out(sample)
311
+
312
+ if self.latent_log_var == "uniform":
313
+ last_channel = sample[:, -1:, ...]
314
+ num_dims = sample.dim()
315
+
316
+ if num_dims == 4:
317
+ # For shape (B, C, H, W)
318
+ repeated_last_channel = last_channel.repeat(1, sample.shape[1] - 2, 1, 1)
319
+ sample = torch.cat([sample, repeated_last_channel], dim=1)
320
+ elif num_dims == 5:
321
+ # For shape (B, C, F, H, W)
322
+ repeated_last_channel = last_channel.repeat(1, sample.shape[1] - 2, 1, 1, 1)
323
+ sample = torch.cat([sample, repeated_last_channel], dim=1)
324
+ else:
325
+ raise ValueError(f"Invalid input shape: {sample.shape}")
326
+
327
+ return sample
328
+
329
+
330
+ class Decoder(nn.Module):
331
+ r"""
332
+ The `Decoder` layer of a variational autoencoder that decodes its latent representation into an output sample.
333
+
334
+ Args:
335
+ dims (`int` or `Tuple[int, int]`, *optional*, defaults to 3):
336
+ The number of dimensions to use in convolutions.
337
+ in_channels (`int`, *optional*, defaults to 3):
338
+ The number of input channels.
339
+ out_channels (`int`, *optional*, defaults to 3):
340
+ The number of output channels.
341
+ blocks (`List[Tuple[str, int]]`, *optional*, defaults to `[("res_x", 1)]`):
342
+ The blocks to use. Each block is a tuple of the block name and the number of layers.
343
+ base_channels (`int`, *optional*, defaults to 128):
344
+ The number of output channels for the first convolutional layer.
345
+ norm_num_groups (`int`, *optional*, defaults to 32):
346
+ The number of groups for normalization.
347
+ patch_size (`int`, *optional*, defaults to 1):
348
+ The patch size to use. Should be a power of 2.
349
+ norm_layer (`str`, *optional*, defaults to `group_norm`):
350
+ The normalization layer to use. Can be either `group_norm` or `pixel_norm`.
351
+ causal (`bool`, *optional*, defaults to `True`):
352
+ Whether to use causal convolutions or not.
353
+ """
354
+
355
+ def __init__(
356
+ self,
357
+ dims,
358
+ in_channels: int = 3,
359
+ out_channels: int = 3,
360
+ blocks: List[Tuple[str, int]] = [("res_x", 1)],
361
+ base_channels: int = 128,
362
+ layers_per_block: int = 2,
363
+ norm_num_groups: int = 32,
364
+ patch_size: int = 1,
365
+ norm_layer: str = "group_norm",
366
+ causal: bool = True,
367
+ ):
368
+ super().__init__()
369
+ self.patch_size = patch_size
370
+ self.layers_per_block = layers_per_block
371
+ out_channels = out_channels * patch_size**2
372
+ num_channel_doubles = len([x for x in blocks if x[0] == "res_x_y"])
373
+ output_channel = base_channels * 2**num_channel_doubles
374
+ self.causal = causal
375
+
376
+ self.conv_in = make_conv_nd(
377
+ dims,
378
+ in_channels,
379
+ output_channel,
380
+ kernel_size=3,
381
+ stride=1,
382
+ padding=1,
383
+ causal=True,
384
+ )
385
+
386
+ self.up_blocks = nn.ModuleList([])
387
+
388
+ for block_name, num_layers in list(reversed(blocks)):
389
+ input_channel = output_channel
390
+
391
+ if block_name == "res_x":
392
+ block = UNetMidBlock3D(
393
+ dims=dims,
394
+ in_channels=input_channel,
395
+ num_layers=num_layers,
396
+ resnet_eps=1e-6,
397
+ resnet_groups=norm_num_groups,
398
+ norm_layer=norm_layer,
399
+ )
400
+ elif block_name == "res_x_y":
401
+ output_channel = output_channel // 2
402
+ block = ResnetBlock3D(
403
+ dims=dims,
404
+ in_channels=input_channel,
405
+ out_channels=output_channel,
406
+ eps=1e-6,
407
+ groups=norm_num_groups,
408
+ norm_layer=norm_layer,
409
+ )
410
+ elif block_name == "compress_time":
411
+ block = DepthToSpaceUpsample(dims=dims, in_channels=input_channel, stride=(2, 1, 1))
412
+ elif block_name == "compress_space":
413
+ block = DepthToSpaceUpsample(dims=dims, in_channels=input_channel, stride=(1, 2, 2))
414
+ elif block_name == "compress_all":
415
+ block = DepthToSpaceUpsample(dims=dims, in_channels=input_channel, stride=(2, 2, 2))
416
+ else:
417
+ raise ValueError(f"unknown layer: {block_name}")
418
+
419
+ self.up_blocks.append(block)
420
+
421
+ if norm_layer == "group_norm":
422
+ self.conv_norm_out = nn.GroupNorm(num_channels=output_channel, num_groups=norm_num_groups, eps=1e-6)
423
+ elif norm_layer == "pixel_norm":
424
+ self.conv_norm_out = PixelNorm()
425
+ elif norm_layer == "layer_norm":
426
+ self.conv_norm_out = LayerNorm(output_channel, eps=1e-6)
427
+
428
+ self.conv_act = nn.SiLU()
429
+ self.conv_out = make_conv_nd(dims, output_channel, out_channels, 3, padding=1, causal=True)
430
+
431
+ self.gradient_checkpointing = False
432
+
433
+ def forward(self, sample: torch.FloatTensor, target_shape) -> torch.FloatTensor:
434
+ r"""The forward method of the `Decoder` class."""
435
+ assert target_shape is not None, "target_shape must be provided"
436
+
437
+ sample = self.conv_in(sample, causal=self.causal)
438
+
439
+ upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
440
+
441
+ checkpoint_fn = (
442
+ partial(torch.utils.checkpoint.checkpoint, use_reentrant=False)
443
+ if self.gradient_checkpointing and self.training
444
+ else lambda x: x
445
+ )
446
+
447
+ sample = sample.to(upscale_dtype)
448
+
449
+ for up_block in self.up_blocks:
450
+ sample = checkpoint_fn(up_block)(sample, causal=self.causal)
451
+
452
+ sample = self.conv_norm_out(sample)
453
+ sample = self.conv_act(sample)
454
+ sample = self.conv_out(sample, causal=self.causal)
455
+
456
+ sample = unpatchify(sample, patch_size_hw=self.patch_size, patch_size_t=1)
457
+
458
+ return sample
459
+
460
+
461
+ class UNetMidBlock3D(nn.Module):
462
+ """
463
+ A 3D UNet mid-block [`UNetMidBlock3D`] with multiple residual blocks.
464
+
465
+ Args:
466
+ in_channels (`int`): The number of input channels.
467
+ dropout (`float`, *optional*, defaults to 0.0): The dropout rate.
468
+ num_layers (`int`, *optional*, defaults to 1): The number of residual blocks.
469
+ resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks.
470
+ resnet_groups (`int`, *optional*, defaults to 32):
471
+ The number of groups to use in the group normalization layers of the resnet blocks.
472
+
473
+ Returns:
474
+ `torch.FloatTensor`: The output of the last residual block, which is a tensor of shape `(batch_size,
475
+ in_channels, height, width)`.
476
+
477
+ """
478
+
479
+ def __init__(
480
+ self,
481
+ dims: Union[int, Tuple[int, int]],
482
+ in_channels: int,
483
+ dropout: float = 0.0,
484
+ num_layers: int = 1,
485
+ resnet_eps: float = 1e-6,
486
+ resnet_groups: int = 32,
487
+ norm_layer: str = "group_norm",
488
+ ):
489
+ super().__init__()
490
+ resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
491
+
492
+ self.res_blocks = nn.ModuleList(
493
+ [
494
+ ResnetBlock3D(
495
+ dims=dims,
496
+ in_channels=in_channels,
497
+ out_channels=in_channels,
498
+ eps=resnet_eps,
499
+ groups=resnet_groups,
500
+ dropout=dropout,
501
+ norm_layer=norm_layer,
502
+ )
503
+ for _ in range(num_layers)
504
+ ]
505
+ )
506
+
507
+ def forward(self, hidden_states: torch.FloatTensor, causal: bool = True) -> torch.FloatTensor:
508
+ for resnet in self.res_blocks:
509
+ hidden_states = resnet(hidden_states, causal=causal)
510
+
511
+ return hidden_states
512
+
513
+
514
+ class DepthToSpaceUpsample(nn.Module):
515
+ def __init__(self, dims, in_channels, stride):
516
+ super().__init__()
517
+ self.stride = stride
518
+ self.out_channels = np.prod(stride) * in_channels
519
+ self.conv = make_conv_nd(
520
+ dims=dims,
521
+ in_channels=in_channels,
522
+ out_channels=self.out_channels,
523
+ kernel_size=3,
524
+ stride=1,
525
+ causal=True,
526
+ )
527
+
528
+ def forward(self, x, causal: bool = True):
529
+ x = self.conv(x, causal=causal)
530
+ x = rearrange(
531
+ x,
532
+ "b (c p1 p2 p3) d h w -> b c (d p1) (h p2) (w p3)",
533
+ p1=self.stride[0],
534
+ p2=self.stride[1],
535
+ p3=self.stride[2],
536
+ )
537
+ if self.stride[0] == 2:
538
+ x = x[:, :, 1:, :, :]
539
+ return x
540
+
541
+
542
+ class LayerNorm(nn.Module):
543
+ def __init__(self, dim, eps, elementwise_affine=True) -> None:
544
+ super().__init__()
545
+ self.norm = nn.LayerNorm(dim, eps=eps, elementwise_affine=elementwise_affine)
546
+
547
+ def forward(self, x):
548
+ x = rearrange(x, "b c d h w -> b d h w c")
549
+ x = self.norm(x)
550
+ x = rearrange(x, "b d h w c -> b c d h w")
551
+ return x
552
+
553
+
554
+ class ResnetBlock3D(nn.Module):
555
+ r"""
556
+ A Resnet block.
557
+
558
+ Parameters:
559
+ in_channels (`int`): The number of channels in the input.
560
+ out_channels (`int`, *optional*, default to be `None`):
561
+ The number of output channels for the first conv layer. If None, same as `in_channels`.
562
+ dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.
563
+ groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer.
564
+ eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization.
565
+ """
566
+
567
+ def __init__(
568
+ self,
569
+ dims: Union[int, Tuple[int, int]],
570
+ in_channels: int,
571
+ out_channels: Optional[int] = None,
572
+ conv_shortcut: bool = False,
573
+ dropout: float = 0.0,
574
+ groups: int = 32,
575
+ eps: float = 1e-6,
576
+ norm_layer: str = "group_norm",
577
+ ):
578
+ super().__init__()
579
+ self.in_channels = in_channels
580
+ out_channels = in_channels if out_channels is None else out_channels
581
+ self.out_channels = out_channels
582
+ self.use_conv_shortcut = conv_shortcut
583
+
584
+ if norm_layer == "group_norm":
585
+ self.norm1 = nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
586
+ elif norm_layer == "pixel_norm":
587
+ self.norm1 = PixelNorm()
588
+ elif norm_layer == "layer_norm":
589
+ self.norm1 = LayerNorm(in_channels, eps=eps, elementwise_affine=True)
590
+
591
+ self.non_linearity = nn.SiLU()
592
+
593
+ self.conv1 = make_conv_nd(dims, in_channels, out_channels, kernel_size=3, stride=1, padding=1, causal=True)
594
+
595
+ if norm_layer == "group_norm":
596
+ self.norm2 = nn.GroupNorm(num_groups=groups, num_channels=out_channels, eps=eps, affine=True)
597
+ elif norm_layer == "pixel_norm":
598
+ self.norm2 = PixelNorm()
599
+ elif norm_layer == "layer_norm":
600
+ self.norm2 = LayerNorm(out_channels, eps=eps, elementwise_affine=True)
601
+
602
+ self.dropout = torch.nn.Dropout(dropout)
603
+
604
+ self.conv2 = make_conv_nd(dims, out_channels, out_channels, kernel_size=3, stride=1, padding=1, causal=True)
605
+
606
+ self.conv_shortcut = (
607
+ make_linear_nd(dims=dims, in_channels=in_channels, out_channels=out_channels)
608
+ if in_channels != out_channels
609
+ else nn.Identity()
610
+ )
611
+
612
+ self.norm3 = (
613
+ LayerNorm(in_channels, eps=eps, elementwise_affine=True) if in_channels != out_channels else nn.Identity()
614
+ )
615
+
616
+ def forward(
617
+ self,
618
+ input_tensor: torch.FloatTensor,
619
+ causal: bool = True,
620
+ ) -> torch.FloatTensor:
621
+ hidden_states = input_tensor
622
+
623
+ hidden_states = self.norm1(hidden_states)
624
+
625
+ hidden_states = self.non_linearity(hidden_states)
626
+
627
+ hidden_states = self.conv1(hidden_states, causal=causal)
628
+
629
+ hidden_states = self.norm2(hidden_states)
630
+
631
+ hidden_states = self.non_linearity(hidden_states)
632
+
633
+ hidden_states = self.dropout(hidden_states)
634
+
635
+ hidden_states = self.conv2(hidden_states, causal=causal)
636
+
637
+ input_tensor = self.norm3(input_tensor)
638
+
639
+ input_tensor = self.conv_shortcut(input_tensor)
640
+
641
+ output_tensor = input_tensor + hidden_states
642
+
643
+ return output_tensor
644
+
645
+
646
+ def patchify(x, patch_size_hw, patch_size_t=1):
647
+ if patch_size_hw == 1 and patch_size_t == 1:
648
+ return x
649
+ if x.dim() == 4:
650
+ x = rearrange(x, "b c (h q) (w r) -> b (c r q) h w", q=patch_size_hw, r=patch_size_hw)
651
+ elif x.dim() == 5:
652
+ x = rearrange(x, "b c (f p) (h q) (w r) -> b (c p r q) f h w", p=patch_size_t, q=patch_size_hw, r=patch_size_hw)
653
+ else:
654
+ raise ValueError(f"Invalid input shape: {x.shape}")
655
+
656
+ return x
657
+
658
+
659
+ def unpatchify(x, patch_size_hw, patch_size_t=1):
660
+ if patch_size_hw == 1 and patch_size_t == 1:
661
+ return x
662
+
663
+ if x.dim() == 4:
664
+ x = rearrange(x, "b (c r q) h w -> b c (h q) (w r)", q=patch_size_hw, r=patch_size_hw)
665
+ elif x.dim() == 5:
666
+ x = rearrange(x, "b (c p r q) f h w -> b c (f p) (h q) (w r)", p=patch_size_t, q=patch_size_hw, r=patch_size_hw)
667
+
668
+ return x
669
+
670
+
671
+ def create_video_autoencoder_config(
672
+ latent_channels: int = 64,
673
+ ):
674
+ config = {
675
+ "_class_name": "CausalVideoAutoencoder",
676
+ "dims": 3, # (2, 1), # 2 for Conv2, 3 for Conv3d, (2, 1) for Conv2d followed by Conv1d
677
+ "in_channels": 3, # Number of input color channels (e.g., RGB)
678
+ "out_channels": 3, # Number of output color channels
679
+ "latent_channels": latent_channels, # Number of channels in the latent space representation
680
+ "blocks": [
681
+ ("res_x", 4),
682
+ ("compress_space", 1),
683
+ ("res_x_y", 1),
684
+ ("res_x", 2),
685
+ ("compress_all", 1),
686
+ ("res_x", 3),
687
+ ("compress_all", 1),
688
+ ("res_x_y", 1),
689
+ ("res_x", 2),
690
+ ("compress_time", 1),
691
+ ("res_x", 3),
692
+ ("res_x", 3),
693
+ ],
694
+ "patch_size": 4,
695
+ "latent_log_var": "uniform",
696
+ "use_quant_conv": False,
697
+ "norm_layer": "layer_norm",
698
+ "causal_decoder": True,
699
+ }
700
+
701
+ return config
702
+
703
+
704
+ def test_vae_patchify_unpatchify():
705
+ import torch
706
+
707
+ x = torch.randn(2, 3, 8, 64, 64)
708
+ x_patched = patchify(x, patch_size_hw=4, patch_size_t=4)
709
+ x_unpatched = unpatchify(x_patched, patch_size_hw=4, patch_size_t=4)
710
+ assert torch.allclose(x, x_unpatched)
711
+
712
+
713
+ def demo_video_autoencoder_forward_backward():
714
+ # Configuration for the VideoAutoencoder
715
+ config = create_video_autoencoder_config()
716
+
717
+ # Instantiate the VideoAutoencoder with the specified configuration
718
+ video_autoencoder = CausalVideoAutoencoder.from_config(config)
719
+
720
+ print(video_autoencoder)
721
+ video_autoencoder.eval()
722
+ # Print the total number of parameters in the video autoencoder
723
+ total_params = sum(p.numel() for p in video_autoencoder.parameters())
724
+ print(f"Total number of parameters in VideoAutoencoder: {total_params:,}")
725
+
726
+ # Create a mock input tensor simulating a batch of videos
727
+ # Shape: (batch_size, channels, depth, height, width)
728
+ # E.g., 4 videos, each with 3 color channels, 16 frames, and 64x64 pixels per frame
729
+ input_videos = torch.randn(2, 3, 17, 64, 64)
730
+
731
+ # Forward pass: encode and decode the input videos
732
+ latent = video_autoencoder.encode(input_videos).latent_dist.mode()
733
+ print(f"input shape={input_videos.shape}")
734
+ print(f"latent shape={latent.shape}")
735
+
736
+ reconstructed_videos = video_autoencoder.decode(latent, target_shape=input_videos.shape).sample
737
+
738
+ print(f"reconstructed shape={reconstructed_videos.shape}")
739
+
740
+ # Validate that single image gets treated the same way as first frame
741
+ input_image = input_videos[:, :, :1, :, :]
742
+ image_latent = video_autoencoder.encode(input_image).latent_dist.mode()
743
+ reconstructed_image = video_autoencoder.decode(image_latent, target_shape=image_latent.shape).sample
744
+
745
+ first_frame_latent = latent[:, :, :1, :, :]
746
+
747
+ # assert torch.allclose(image_latent, first_frame_latent, atol=1e-6)
748
+ # assert torch.allclose(reconstructed_image, reconstructed_videos[:, :, :1, :, :], atol=1e-6)
749
+ assert (image_latent == first_frame_latent).all()
750
+ assert (reconstructed_image == reconstructed_videos[:, :, :1, :, :]).all()
751
+
752
+ # Calculate the loss (e.g., mean squared error)
753
+ loss = torch.nn.functional.mse_loss(input_videos, reconstructed_videos)
754
+
755
+ # Perform backward pass
756
+ loss.backward()
757
+
758
+ print(f"Demo completed with loss: {loss.item()}")
759
+
760
+
761
+ # Ensure to call the demo function to execute the forward and backward pass
762
+ if __name__ == "__main__":
763
+ demo_video_autoencoder_forward_backward()
xora/models/autoencoders/conv_nd_factory.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Tuple, Union
2
+
3
+ import torch
4
+
5
+ from xora.models.autoencoders.dual_conv3d import DualConv3d
6
+ from xora.models.autoencoders.causal_conv3d import CausalConv3d
7
+
8
+
9
+ def make_conv_nd(
10
+ dims: Union[int, Tuple[int, int]],
11
+ in_channels: int,
12
+ out_channels: int,
13
+ kernel_size: int,
14
+ stride=1,
15
+ padding=0,
16
+ dilation=1,
17
+ groups=1,
18
+ bias=True,
19
+ causal=False,
20
+ ):
21
+ if dims == 2:
22
+ return torch.nn.Conv2d(
23
+ in_channels=in_channels,
24
+ out_channels=out_channels,
25
+ kernel_size=kernel_size,
26
+ stride=stride,
27
+ padding=padding,
28
+ dilation=dilation,
29
+ groups=groups,
30
+ bias=bias,
31
+ )
32
+ elif dims == 3:
33
+ if causal:
34
+ return CausalConv3d(
35
+ in_channels=in_channels,
36
+ out_channels=out_channels,
37
+ kernel_size=kernel_size,
38
+ stride=stride,
39
+ padding=padding,
40
+ dilation=dilation,
41
+ groups=groups,
42
+ bias=bias,
43
+ )
44
+ return torch.nn.Conv3d(
45
+ in_channels=in_channels,
46
+ out_channels=out_channels,
47
+ kernel_size=kernel_size,
48
+ stride=stride,
49
+ padding=padding,
50
+ dilation=dilation,
51
+ groups=groups,
52
+ bias=bias,
53
+ )
54
+ elif dims == (2, 1):
55
+ return DualConv3d(
56
+ in_channels=in_channels,
57
+ out_channels=out_channels,
58
+ kernel_size=kernel_size,
59
+ stride=stride,
60
+ padding=padding,
61
+ bias=bias,
62
+ )
63
+ else:
64
+ raise ValueError(f"unsupported dimensions: {dims}")
65
+
66
+
67
+ def make_linear_nd(
68
+ dims: int,
69
+ in_channels: int,
70
+ out_channels: int,
71
+ bias=True,
72
+ ):
73
+ if dims == 2:
74
+ return torch.nn.Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=bias)
75
+ elif dims == 3 or dims == (2, 1):
76
+ return torch.nn.Conv3d(in_channels=in_channels, out_channels=out_channels, kernel_size=1, bias=bias)
77
+ else:
78
+ raise ValueError(f"unsupported dimensions: {dims}")
xora/models/autoencoders/dual_conv3d.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from typing import Tuple, Union
3
+
4
+ import torch
5
+ import torch.nn as nn
6
+ import torch.nn.functional as F
7
+ from einops import rearrange
8
+
9
+
10
+ class DualConv3d(nn.Module):
11
+ def __init__(
12
+ self,
13
+ in_channels,
14
+ out_channels,
15
+ kernel_size,
16
+ stride: Union[int, Tuple[int, int, int]] = 1,
17
+ padding: Union[int, Tuple[int, int, int]] = 0,
18
+ dilation: Union[int, Tuple[int, int, int]] = 1,
19
+ groups=1,
20
+ bias=True,
21
+ ):
22
+ super(DualConv3d, self).__init__()
23
+
24
+ self.in_channels = in_channels
25
+ self.out_channels = out_channels
26
+ # Ensure kernel_size, stride, padding, and dilation are tuples of length 3
27
+ if isinstance(kernel_size, int):
28
+ kernel_size = (kernel_size, kernel_size, kernel_size)
29
+ if kernel_size == (1, 1, 1):
30
+ raise ValueError("kernel_size must be greater than 1. Use make_linear_nd instead.")
31
+ if isinstance(stride, int):
32
+ stride = (stride, stride, stride)
33
+ if isinstance(padding, int):
34
+ padding = (padding, padding, padding)
35
+ if isinstance(dilation, int):
36
+ dilation = (dilation, dilation, dilation)
37
+
38
+ # Set parameters for convolutions
39
+ self.groups = groups
40
+ self.bias = bias
41
+
42
+ # Define the size of the channels after the first convolution
43
+ intermediate_channels = out_channels if in_channels < out_channels else in_channels
44
+
45
+ # Define parameters for the first convolution
46
+ self.weight1 = nn.Parameter(
47
+ torch.Tensor(intermediate_channels, in_channels // groups, 1, kernel_size[1], kernel_size[2])
48
+ )
49
+ self.stride1 = (1, stride[1], stride[2])
50
+ self.padding1 = (0, padding[1], padding[2])
51
+ self.dilation1 = (1, dilation[1], dilation[2])
52
+ if bias:
53
+ self.bias1 = nn.Parameter(torch.Tensor(intermediate_channels))
54
+ else:
55
+ self.register_parameter("bias1", None)
56
+
57
+ # Define parameters for the second convolution
58
+ self.weight2 = nn.Parameter(torch.Tensor(out_channels, intermediate_channels // groups, kernel_size[0], 1, 1))
59
+ self.stride2 = (stride[0], 1, 1)
60
+ self.padding2 = (padding[0], 0, 0)
61
+ self.dilation2 = (dilation[0], 1, 1)
62
+ if bias:
63
+ self.bias2 = nn.Parameter(torch.Tensor(out_channels))
64
+ else:
65
+ self.register_parameter("bias2", None)
66
+
67
+ # Initialize weights and biases
68
+ self.reset_parameters()
69
+
70
+ def reset_parameters(self):
71
+ nn.init.kaiming_uniform_(self.weight1, a=math.sqrt(5))
72
+ nn.init.kaiming_uniform_(self.weight2, a=math.sqrt(5))
73
+ if self.bias:
74
+ fan_in1, _ = nn.init._calculate_fan_in_and_fan_out(self.weight1)
75
+ bound1 = 1 / math.sqrt(fan_in1)
76
+ nn.init.uniform_(self.bias1, -bound1, bound1)
77
+ fan_in2, _ = nn.init._calculate_fan_in_and_fan_out(self.weight2)
78
+ bound2 = 1 / math.sqrt(fan_in2)
79
+ nn.init.uniform_(self.bias2, -bound2, bound2)
80
+
81
+ def forward(self, x, use_conv3d=False, skip_time_conv=False):
82
+ if use_conv3d:
83
+ return self.forward_with_3d(x=x, skip_time_conv=skip_time_conv)
84
+ else:
85
+ return self.forward_with_2d(x=x, skip_time_conv=skip_time_conv)
86
+
87
+ def forward_with_3d(self, x, skip_time_conv):
88
+ # First convolution
89
+ x = F.conv3d(x, self.weight1, self.bias1, self.stride1, self.padding1, self.dilation1, self.groups)
90
+
91
+ if skip_time_conv:
92
+ return x
93
+
94
+ # Second convolution
95
+ x = F.conv3d(x, self.weight2, self.bias2, self.stride2, self.padding2, self.dilation2, self.groups)
96
+
97
+ return x
98
+
99
+ def forward_with_2d(self, x, skip_time_conv):
100
+ b, c, d, h, w = x.shape
101
+
102
+ # First 2D convolution
103
+ x = rearrange(x, "b c d h w -> (b d) c h w")
104
+ # Squeeze the depth dimension out of weight1 since it's 1
105
+ weight1 = self.weight1.squeeze(2)
106
+ # Select stride, padding, and dilation for the 2D convolution
107
+ stride1 = (self.stride1[1], self.stride1[2])
108
+ padding1 = (self.padding1[1], self.padding1[2])
109
+ dilation1 = (self.dilation1[1], self.dilation1[2])
110
+ x = F.conv2d(x, weight1, self.bias1, stride1, padding1, dilation1, self.groups)
111
+
112
+ _, _, h, w = x.shape
113
+
114
+ if skip_time_conv:
115
+ x = rearrange(x, "(b d) c h w -> b c d h w", b=b)
116
+ return x
117
+
118
+ # Second convolution which is essentially treated as a 1D convolution across the 'd' dimension
119
+ x = rearrange(x, "(b d) c h w -> (b h w) c d", b=b)
120
+
121
+ # Reshape weight2 to match the expected dimensions for conv1d
122
+ weight2 = self.weight2.squeeze(-1).squeeze(-1)
123
+ # Use only the relevant dimension for stride, padding, and dilation for the 1D convolution
124
+ stride2 = self.stride2[0]
125
+ padding2 = self.padding2[0]
126
+ dilation2 = self.dilation2[0]
127
+ x = F.conv1d(x, weight2, self.bias2, stride2, padding2, dilation2, self.groups)
128
+ x = rearrange(x, "(b h w) c d -> b c d h w", b=b, h=h, w=w)
129
+
130
+ return x
131
+
132
+ @property
133
+ def weight(self):
134
+ return self.weight2
135
+
136
+
137
+ def test_dual_conv3d_consistency():
138
+ # Initialize parameters
139
+ in_channels = 3
140
+ out_channels = 5
141
+ kernel_size = (3, 3, 3)
142
+ stride = (2, 2, 2)
143
+ padding = (1, 1, 1)
144
+
145
+ # Create an instance of the DualConv3d class
146
+ dual_conv3d = DualConv3d(
147
+ in_channels=in_channels,
148
+ out_channels=out_channels,
149
+ kernel_size=kernel_size,
150
+ stride=stride,
151
+ padding=padding,
152
+ bias=True,
153
+ )
154
+
155
+ # Example input tensor
156
+ test_input = torch.randn(1, 3, 10, 10, 10)
157
+
158
+ # Perform forward passes with both 3D and 2D settings
159
+ output_conv3d = dual_conv3d(test_input, use_conv3d=True)
160
+ output_2d = dual_conv3d(test_input, use_conv3d=False)
161
+
162
+ # Assert that the outputs from both methods are sufficiently close
163
+ assert torch.allclose(
164
+ output_conv3d, output_2d, atol=1e-6
165
+ ), "Outputs are not consistent between 3D and 2D convolutions."
xora/models/autoencoders/pixel_norm.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+
4
+
5
+ class PixelNorm(nn.Module):
6
+ def __init__(self, dim=1, eps=1e-8):
7
+ super(PixelNorm, self).__init__()
8
+ self.dim = dim
9
+ self.eps = eps
10
+
11
+ def forward(self, x):
12
+ return x / torch.sqrt(torch.mean(x**2, dim=self.dim, keepdim=True) + self.eps)
xora/models/autoencoders/vae.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Union
2
+
3
+ import torch
4
+ import math
5
+ import torch.nn as nn
6
+ from diffusers import ConfigMixin, ModelMixin
7
+ from diffusers.models.autoencoders.vae import DecoderOutput, DiagonalGaussianDistribution
8
+ from diffusers.models.modeling_outputs import AutoencoderKLOutput
9
+ from xora.models.autoencoders.conv_nd_factory import make_conv_nd
10
+
11
+
12
+ class AutoencoderKLWrapper(ModelMixin, ConfigMixin):
13
+ """Variational Autoencoder (VAE) model with KL loss.
14
+
15
+ VAE from the paper Auto-Encoding Variational Bayes by Diederik P. Kingma and Max Welling.
16
+ This model is a wrapper around an encoder and a decoder, and it adds a KL loss term to the reconstruction loss.
17
+
18
+ Args:
19
+ encoder (`nn.Module`):
20
+ Encoder module.
21
+ decoder (`nn.Module`):
22
+ Decoder module.
23
+ latent_channels (`int`, *optional*, defaults to 4):
24
+ Number of latent channels.
25
+ """
26
+
27
+ def __init__(
28
+ self,
29
+ encoder: nn.Module,
30
+ decoder: nn.Module,
31
+ latent_channels: int = 4,
32
+ dims: int = 2,
33
+ sample_size=512,
34
+ use_quant_conv: bool = True,
35
+ ):
36
+ super().__init__()
37
+
38
+ # pass init params to Encoder
39
+ self.encoder = encoder
40
+ self.use_quant_conv = use_quant_conv
41
+
42
+ # pass init params to Decoder
43
+ quant_dims = 2 if dims == 2 else 3
44
+ self.decoder = decoder
45
+ if use_quant_conv:
46
+ self.quant_conv = make_conv_nd(quant_dims, 2 * latent_channels, 2 * latent_channels, 1)
47
+ self.post_quant_conv = make_conv_nd(quant_dims, latent_channels, latent_channels, 1)
48
+ else:
49
+ self.quant_conv = nn.Identity()
50
+ self.post_quant_conv = nn.Identity()
51
+ self.use_z_tiling = False
52
+ self.use_hw_tiling = False
53
+ self.dims = dims
54
+ self.z_sample_size = 1
55
+
56
+ # only relevant if vae tiling is enabled
57
+ self.set_tiling_params(sample_size=sample_size, overlap_factor=0.25)
58
+
59
+ def set_tiling_params(self, sample_size: int = 512, overlap_factor: float = 0.25):
60
+ self.tile_sample_min_size = sample_size
61
+ num_blocks = len(self.encoder.down_blocks)
62
+ self.tile_latent_min_size = int(sample_size / (2 ** (num_blocks - 1)))
63
+ self.tile_overlap_factor = overlap_factor
64
+
65
+ def enable_z_tiling(self, z_sample_size: int = 8):
66
+ r"""
67
+ Enable tiling during VAE decoding.
68
+
69
+ When this option is enabled, the VAE will split the input tensor in tiles to compute decoding in several
70
+ steps. This is useful to save some memory and allow larger batch sizes.
71
+ """
72
+ self.use_z_tiling = z_sample_size > 1
73
+ self.z_sample_size = z_sample_size
74
+ assert (
75
+ z_sample_size % 8 == 0 or z_sample_size == 1
76
+ ), f"z_sample_size must be a multiple of 8 or 1. Got {z_sample_size}."
77
+
78
+ def disable_z_tiling(self):
79
+ r"""
80
+ Disable tiling during VAE decoding. If `use_tiling` was previously invoked, this method will go back to computing
81
+ decoding in one step.
82
+ """
83
+ self.use_z_tiling = False
84
+
85
+ def enable_hw_tiling(self):
86
+ r"""
87
+ Enable tiling during VAE decoding along the height and width dimension.
88
+ """
89
+ self.use_hw_tiling = True
90
+
91
+ def disable_hw_tiling(self):
92
+ r"""
93
+ Disable tiling during VAE decoding along the height and width dimension.
94
+ """
95
+ self.use_hw_tiling = False
96
+
97
+ def _hw_tiled_encode(self, x: torch.FloatTensor, return_dict: bool = True):
98
+ overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
99
+ blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
100
+ row_limit = self.tile_latent_min_size - blend_extent
101
+
102
+ # Split the image into 512x512 tiles and encode them separately.
103
+ rows = []
104
+ for i in range(0, x.shape[3], overlap_size):
105
+ row = []
106
+ for j in range(0, x.shape[4], overlap_size):
107
+ tile = x[:, :, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
108
+ tile = self.encoder(tile)
109
+ tile = self.quant_conv(tile)
110
+ row.append(tile)
111
+ rows.append(row)
112
+ result_rows = []
113
+ for i, row in enumerate(rows):
114
+ result_row = []
115
+ for j, tile in enumerate(row):
116
+ # blend the above tile and the left tile
117
+ # to the current tile and add the current tile to the result row
118
+ if i > 0:
119
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
120
+ if j > 0:
121
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
122
+ result_row.append(tile[:, :, :, :row_limit, :row_limit])
123
+ result_rows.append(torch.cat(result_row, dim=4))
124
+
125
+ moments = torch.cat(result_rows, dim=3)
126
+ return moments
127
+
128
+ def blend_z(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
129
+ blend_extent = min(a.shape[2], b.shape[2], blend_extent)
130
+ for z in range(blend_extent):
131
+ b[:, :, z, :, :] = a[:, :, -blend_extent + z, :, :] * (1 - z / blend_extent) + b[:, :, z, :, :] * (
132
+ z / blend_extent
133
+ )
134
+ return b
135
+
136
+ def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
137
+ blend_extent = min(a.shape[3], b.shape[3], blend_extent)
138
+ for y in range(blend_extent):
139
+ b[:, :, :, y, :] = a[:, :, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, :, y, :] * (
140
+ y / blend_extent
141
+ )
142
+ return b
143
+
144
+ def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
145
+ blend_extent = min(a.shape[4], b.shape[4], blend_extent)
146
+ for x in range(blend_extent):
147
+ b[:, :, :, :, x] = a[:, :, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, :, x] * (
148
+ x / blend_extent
149
+ )
150
+ return b
151
+
152
+ def _hw_tiled_decode(self, z: torch.FloatTensor, target_shape):
153
+ overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
154
+ blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor)
155
+ row_limit = self.tile_sample_min_size - blend_extent
156
+ tile_target_shape = (*target_shape[:3], self.tile_sample_min_size, self.tile_sample_min_size)
157
+ # Split z into overlapping 64x64 tiles and decode them separately.
158
+ # The tiles have an overlap to avoid seams between tiles.
159
+ rows = []
160
+ for i in range(0, z.shape[3], overlap_size):
161
+ row = []
162
+ for j in range(0, z.shape[4], overlap_size):
163
+ tile = z[:, :, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
164
+ tile = self.post_quant_conv(tile)
165
+ decoded = self.decoder(tile, target_shape=tile_target_shape)
166
+ row.append(decoded)
167
+ rows.append(row)
168
+ result_rows = []
169
+ for i, row in enumerate(rows):
170
+ result_row = []
171
+ for j, tile in enumerate(row):
172
+ # blend the above tile and the left tile
173
+ # to the current tile and add the current tile to the result row
174
+ if i > 0:
175
+ tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
176
+ if j > 0:
177
+ tile = self.blend_h(row[j - 1], tile, blend_extent)
178
+ result_row.append(tile[:, :, :, :row_limit, :row_limit])
179
+ result_rows.append(torch.cat(result_row, dim=4))
180
+
181
+ dec = torch.cat(result_rows, dim=3)
182
+ return dec
183
+
184
+ def encode(self, z: torch.FloatTensor, return_dict: bool = True) -> Union[DecoderOutput, torch.FloatTensor]:
185
+ if self.use_z_tiling and z.shape[2] > self.z_sample_size > 1:
186
+ num_splits = z.shape[2] // self.z_sample_size
187
+ sizes = [self.z_sample_size] * num_splits
188
+ sizes = sizes + [z.shape[2] - sum(sizes)] if z.shape[2] - sum(sizes) > 0 else sizes
189
+ tiles = z.split(sizes, dim=2)
190
+ moments_tiles = [
191
+ self._hw_tiled_encode(z_tile, return_dict) if self.use_hw_tiling else self._encode(z_tile)
192
+ for z_tile in tiles
193
+ ]
194
+ moments = torch.cat(moments_tiles, dim=2)
195
+
196
+ else:
197
+ moments = self._hw_tiled_encode(z, return_dict) if self.use_hw_tiling else self._encode(z)
198
+
199
+ posterior = DiagonalGaussianDistribution(moments)
200
+ if not return_dict:
201
+ return (posterior,)
202
+
203
+ return AutoencoderKLOutput(latent_dist=posterior)
204
+
205
+ def _encode(self, x: torch.FloatTensor) -> AutoencoderKLOutput:
206
+ h = self.encoder(x)
207
+ moments = self.quant_conv(h)
208
+ return moments
209
+
210
+ def _decode(self, z: torch.FloatTensor, target_shape=None) -> Union[DecoderOutput, torch.FloatTensor]:
211
+ z = self.post_quant_conv(z)
212
+ dec = self.decoder(z, target_shape=target_shape)
213
+ return dec
214
+
215
+ def decode(
216
+ self, z: torch.FloatTensor, return_dict: bool = True, target_shape=None
217
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
218
+ assert target_shape is not None, "target_shape must be provided for decoding"
219
+ if self.use_z_tiling and z.shape[2] > self.z_sample_size > 1:
220
+ reduction_factor = int(
221
+ self.encoder.patch_size_t
222
+ * 2 ** (len(self.encoder.down_blocks) - 1 - math.sqrt(self.encoder.patch_size))
223
+ )
224
+ split_size = self.z_sample_size // reduction_factor
225
+ num_splits = z.shape[2] // split_size
226
+
227
+ # copy target shape, and divide frame dimension (=2) by the context size
228
+ target_shape_split = list(target_shape)
229
+ target_shape_split[2] = target_shape[2] // num_splits
230
+
231
+ decoded_tiles = [
232
+ (
233
+ self._hw_tiled_decode(z_tile, target_shape_split)
234
+ if self.use_hw_tiling
235
+ else self._decode(z_tile, target_shape=target_shape_split)
236
+ )
237
+ for z_tile in torch.tensor_split(z, num_splits, dim=2)
238
+ ]
239
+ decoded = torch.cat(decoded_tiles, dim=2)
240
+ else:
241
+ decoded = (
242
+ self._hw_tiled_decode(z, target_shape)
243
+ if self.use_hw_tiling
244
+ else self._decode(z, target_shape=target_shape)
245
+ )
246
+
247
+ if not return_dict:
248
+ return (decoded,)
249
+
250
+ return DecoderOutput(sample=decoded)
251
+
252
+ def forward(
253
+ self,
254
+ sample: torch.FloatTensor,
255
+ sample_posterior: bool = False,
256
+ return_dict: bool = True,
257
+ generator: Optional[torch.Generator] = None,
258
+ ) -> Union[DecoderOutput, torch.FloatTensor]:
259
+ r"""
260
+ Args:
261
+ sample (`torch.FloatTensor`): Input sample.
262
+ sample_posterior (`bool`, *optional*, defaults to `False`):
263
+ Whether to sample from the posterior.
264
+ return_dict (`bool`, *optional*, defaults to `True`):
265
+ Whether to return a [`DecoderOutput`] instead of a plain tuple.
266
+ generator (`torch.Generator`, *optional*):
267
+ Generator used to sample from the posterior.
268
+ """
269
+ x = sample
270
+ posterior = self.encode(x).latent_dist
271
+ if sample_posterior:
272
+ z = posterior.sample(generator=generator)
273
+ else:
274
+ z = posterior.mode()
275
+ dec = self.decode(z, target_shape=sample.shape).sample
276
+
277
+ if not return_dict:
278
+ return (dec,)
279
+
280
+ return DecoderOutput(sample=dec)
xora/models/autoencoders/vae_encode.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from diffusers import AutoencoderKL
4
+ from einops import rearrange
5
+ from torch import Tensor
6
+ from torch.nn import functional
7
+
8
+
9
+ from xora.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
10
+
11
+ class Downsample3D(nn.Module):
12
+ def __init__(self, dims, in_channels: int, out_channels: int, kernel_size: int = 3, padding: int = 1):
13
+ super().__init__()
14
+ stride: int = 2
15
+ self.padding = padding
16
+ self.in_channels = in_channels
17
+ self.dims = dims
18
+ self.conv = make_conv_nd(
19
+ dims=dims,
20
+ in_channels=in_channels,
21
+ out_channels=out_channels,
22
+ kernel_size=kernel_size,
23
+ stride=stride,
24
+ padding=padding,
25
+ )
26
+
27
+ def forward(self, x, downsample_in_time=True):
28
+ conv = self.conv
29
+ if self.padding == 0:
30
+ if self.dims == 2:
31
+ padding = (0, 1, 0, 1)
32
+ else:
33
+ padding = (0, 1, 0, 1, 0, 1 if downsample_in_time else 0)
34
+
35
+ x = functional.pad(x, padding, mode="constant", value=0)
36
+
37
+ if self.dims == (2, 1) and not downsample_in_time:
38
+ return conv(x, skip_time_conv=True)
39
+
40
+ return conv(x)
41
+
42
+
43
+
44
+ def vae_encode(media_items: Tensor, vae: AutoencoderKL, split_size: int = 1, vae_per_channel_normalize=False) -> Tensor:
45
+ """
46
+ Encodes media items (images or videos) into latent representations using a specified VAE model.
47
+ The function supports processing batches of images or video frames and can handle the processing
48
+ in smaller sub-batches if needed.
49
+
50
+ Args:
51
+ media_items (Tensor): A torch Tensor containing the media items to encode. The expected
52
+ shape is (batch_size, channels, height, width) for images or (batch_size, channels,
53
+ frames, height, width) for videos.
54
+ vae (AutoencoderKL): An instance of the `AutoencoderKL` class from the `diffusers` library,
55
+ pre-configured and loaded with the appropriate model weights.
56
+ split_size (int, optional): The number of sub-batches to split the input batch into for encoding.
57
+ If set to more than 1, the input media items are processed in smaller batches according to
58
+ this value. Defaults to 1, which processes all items in a single batch.
59
+
60
+ Returns:
61
+ Tensor: A torch Tensor of the encoded latent representations. The shape of the tensor is adjusted
62
+ to match the input shape, scaled by the model's configuration.
63
+
64
+ Examples:
65
+ >>> import torch
66
+ >>> from diffusers import AutoencoderKL
67
+ >>> vae = AutoencoderKL.from_pretrained('your-model-name')
68
+ >>> images = torch.rand(10, 3, 8 256, 256) # Example tensor with 10 videos of 8 frames.
69
+ >>> latents = vae_encode(images, vae)
70
+ >>> print(latents.shape) # Output shape will depend on the model's latent configuration.
71
+
72
+ Note:
73
+ In case of a video, the function encodes the media item frame-by frame.
74
+ """
75
+ is_video_shaped = media_items.dim() == 5
76
+ batch_size, channels = media_items.shape[0:2]
77
+
78
+ if channels != 3:
79
+ raise ValueError(f"Expects tensors with 3 channels, got {channels}.")
80
+
81
+ if is_video_shaped and not isinstance(vae, (CausalVideoAutoencoder)):
82
+ media_items = rearrange(media_items, "b c n h w -> (b n) c h w")
83
+ if split_size > 1:
84
+ if len(media_items) % split_size != 0:
85
+ raise ValueError("Error: The batch size must be divisible by 'train.vae_bs_split")
86
+ encode_bs = len(media_items) // split_size
87
+ # latents = [vae.encode(image_batch).latent_dist.sample() for image_batch in media_items.split(encode_bs)]
88
+ latents = []
89
+ for image_batch in media_items.split(encode_bs):
90
+ latents.append(vae.encode(image_batch).latent_dist.sample())
91
+ latents = torch.cat(latents, dim=0)
92
+ else:
93
+ latents = vae.encode(media_items).latent_dist.sample()
94
+
95
+ latents = normalize_latents(latents, vae, vae_per_channel_normalize)
96
+ if is_video_shaped and not isinstance(vae, (CausalVideoAutoencoder)):
97
+ latents = rearrange(latents, "(b n) c h w -> b c n h w", b=batch_size)
98
+ return latents
99
+
100
+
101
+ def vae_decode(
102
+ latents: Tensor, vae: AutoencoderKL, is_video: bool = True, split_size: int = 1, vae_per_channel_normalize=False
103
+ ) -> Tensor:
104
+ is_video_shaped = latents.dim() == 5
105
+ batch_size = latents.shape[0]
106
+
107
+ if is_video_shaped and not isinstance(vae, (CausalVideoAutoencoder)):
108
+ latents = rearrange(latents, "b c n h w -> (b n) c h w")
109
+ if split_size > 1:
110
+ if len(latents) % split_size != 0:
111
+ raise ValueError("Error: The batch size must be divisible by 'train.vae_bs_split")
112
+ encode_bs = len(latents) // split_size
113
+ image_batch = [
114
+ _run_decoder(latent_batch, vae, is_video, vae_per_channel_normalize)
115
+ for latent_batch in latents.split(encode_bs)
116
+ ]
117
+ images = torch.cat(image_batch, dim=0)
118
+ else:
119
+ images = _run_decoder(latents, vae, is_video, vae_per_channel_normalize)
120
+
121
+ if is_video_shaped and not isinstance(vae, (CausalVideoAutoencoder)):
122
+ images = rearrange(images, "(b n) c h w -> b c n h w", b=batch_size)
123
+ return images
124
+
125
+
126
+ def _run_decoder(latents: Tensor, vae: AutoencoderKL, is_video: bool, vae_per_channel_normalize=False) -> Tensor:
127
+ if isinstance(vae, (CausalVideoAutoencoder)):
128
+ *_, fl, hl, wl = latents.shape
129
+ temporal_scale, spatial_scale, _ = get_vae_size_scale_factor(vae)
130
+ latents = latents.to(vae.dtype)
131
+ image = vae.decode(
132
+ un_normalize_latents(latents, vae, vae_per_channel_normalize),
133
+ return_dict=False,
134
+ target_shape=(1, 3, fl * temporal_scale if is_video else 1, hl * spatial_scale, wl * spatial_scale),
135
+ )[0]
136
+ else:
137
+ image = vae.decode(
138
+ un_normalize_latents(latents, vae, vae_per_channel_normalize),
139
+ return_dict=False,
140
+ )[0]
141
+ return image
142
+
143
+
144
+ def get_vae_size_scale_factor(vae: AutoencoderKL) -> float:
145
+ if isinstance(vae, CausalVideoAutoencoder):
146
+ spatial = vae.spatial_downscale_factor
147
+ temporal = vae.temporal_downscale_factor
148
+ else:
149
+ down_blocks = len([block for block in vae.encoder.down_blocks if isinstance(block.downsample, Downsample3D)])
150
+ spatial = vae.config.patch_size * 2**down_blocks
151
+ temporal = vae.config.patch_size_t * 2 ** down_blocks if isinstance(vae) else 1
152
+
153
+ return (temporal, spatial, spatial)
154
+
155
+
156
+ def normalize_latents(latents: Tensor, vae: AutoencoderKL, vae_per_channel_normalize: bool = False) -> Tensor:
157
+ return (
158
+ (latents - vae.mean_of_means.to(latents.dtype).view(1, -1, 1, 1, 1))
159
+ / vae.std_of_means.to(latents.dtype).view(1, -1, 1, 1, 1)
160
+ if vae_per_channel_normalize
161
+ else latents * vae.config.scaling_factor
162
+ )
163
+
164
+
165
+ def un_normalize_latents(latents: Tensor, vae: AutoencoderKL, vae_per_channel_normalize: bool = False) -> Tensor:
166
+ return (
167
+ latents * vae.std_of_means.to(latents.dtype).view(1, -1, 1, 1, 1)
168
+ + vae.mean_of_means.to(latents.dtype).view(1, -1, 1, 1, 1)
169
+ if vae_per_channel_normalize
170
+ else latents / vae.config.scaling_factor
171
+ )
xora/models/transformers/__init__.py ADDED
File without changes
xora/models/transformers/attention.py ADDED
@@ -0,0 +1,1064 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from importlib import import_module
3
+ from typing import Any, Dict, Optional, Tuple
4
+
5
+ import torch
6
+ import torch.nn.functional as F
7
+ from diffusers.models.activations import GEGLU, GELU, ApproximateGELU
8
+ from diffusers.models.attention import _chunked_feed_forward
9
+ from diffusers.models.attention_processor import (
10
+ LoRAAttnAddedKVProcessor,
11
+ LoRAAttnProcessor,
12
+ LoRAAttnProcessor2_0,
13
+ LoRAXFormersAttnProcessor,
14
+ SpatialNorm,
15
+ )
16
+ from diffusers.models.lora import LoRACompatibleLinear
17
+ from diffusers.models.normalization import RMSNorm
18
+ from diffusers.utils import deprecate, logging
19
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
20
+ from einops import rearrange
21
+ from torch import nn
22
+
23
+ # code adapted from https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention.py
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+
28
+ @maybe_allow_in_graph
29
+ class BasicTransformerBlock(nn.Module):
30
+ r"""
31
+ A basic Transformer block.
32
+
33
+ Parameters:
34
+ dim (`int`): The number of channels in the input and output.
35
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
36
+ attention_head_dim (`int`): The number of channels in each head.
37
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
38
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
39
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
40
+ num_embeds_ada_norm (:
41
+ obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
42
+ attention_bias (:
43
+ obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
44
+ only_cross_attention (`bool`, *optional*):
45
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
46
+ double_self_attention (`bool`, *optional*):
47
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
48
+ upcast_attention (`bool`, *optional*):
49
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
50
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
51
+ Whether to use learnable elementwise affine parameters for normalization.
52
+ qk_norm (`str`, *optional*, defaults to None):
53
+ Set to 'layer_norm' or `rms_norm` to perform query and key normalization.
54
+ adaptive_norm (`str`, *optional*, defaults to `"single_scale_shift"`):
55
+ The type of adaptive norm to use. Can be `"single_scale_shift"`, `"single_scale"` or "none".
56
+ standardization_norm (`str`, *optional*, defaults to `"layer_norm"`):
57
+ The type of pre-normalization to use. Can be `"layer_norm"` or `"rms_norm"`.
58
+ final_dropout (`bool` *optional*, defaults to False):
59
+ Whether to apply a final dropout after the last feed-forward layer.
60
+ attention_type (`str`, *optional*, defaults to `"default"`):
61
+ The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
62
+ positional_embeddings (`str`, *optional*, defaults to `None`):
63
+ The type of positional embeddings to apply to.
64
+ num_positional_embeddings (`int`, *optional*, defaults to `None`):
65
+ The maximum number of positional embeddings to apply.
66
+ """
67
+
68
+ def __init__(
69
+ self,
70
+ dim: int,
71
+ num_attention_heads: int,
72
+ attention_head_dim: int,
73
+ dropout=0.0,
74
+ cross_attention_dim: Optional[int] = None,
75
+ activation_fn: str = "geglu",
76
+ num_embeds_ada_norm: Optional[int] = None, # pylint: disable=unused-argument
77
+ attention_bias: bool = False,
78
+ only_cross_attention: bool = False,
79
+ double_self_attention: bool = False,
80
+ upcast_attention: bool = False,
81
+ norm_elementwise_affine: bool = True,
82
+ adaptive_norm: str = "single_scale_shift", # 'single_scale_shift', 'single_scale' or 'none'
83
+ standardization_norm: str = "layer_norm", # 'layer_norm' or 'rms_norm'
84
+ norm_eps: float = 1e-5,
85
+ qk_norm: Optional[str] = None,
86
+ final_dropout: bool = False,
87
+ attention_type: str = "default", # pylint: disable=unused-argument
88
+ ff_inner_dim: Optional[int] = None,
89
+ ff_bias: bool = True,
90
+ attention_out_bias: bool = True,
91
+ use_tpu_flash_attention: bool = False,
92
+ use_rope: bool = False,
93
+ ):
94
+ super().__init__()
95
+ self.only_cross_attention = only_cross_attention
96
+ self.use_tpu_flash_attention = use_tpu_flash_attention
97
+ self.adaptive_norm = adaptive_norm
98
+
99
+ assert standardization_norm in ["layer_norm", "rms_norm"]
100
+ assert adaptive_norm in ["single_scale_shift", "single_scale", "none"]
101
+
102
+ make_norm_layer = nn.LayerNorm if standardization_norm == "layer_norm" else RMSNorm
103
+
104
+ # Define 3 blocks. Each block has its own normalization layer.
105
+ # 1. Self-Attn
106
+ self.norm1 = make_norm_layer(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
107
+
108
+ self.attn1 = Attention(
109
+ query_dim=dim,
110
+ heads=num_attention_heads,
111
+ dim_head=attention_head_dim,
112
+ dropout=dropout,
113
+ bias=attention_bias,
114
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
115
+ upcast_attention=upcast_attention,
116
+ out_bias=attention_out_bias,
117
+ use_tpu_flash_attention=use_tpu_flash_attention,
118
+ qk_norm=qk_norm,
119
+ use_rope=use_rope,
120
+ )
121
+
122
+ # 2. Cross-Attn
123
+ if cross_attention_dim is not None or double_self_attention:
124
+ self.attn2 = Attention(
125
+ query_dim=dim,
126
+ cross_attention_dim=cross_attention_dim if not double_self_attention else None,
127
+ heads=num_attention_heads,
128
+ dim_head=attention_head_dim,
129
+ dropout=dropout,
130
+ bias=attention_bias,
131
+ upcast_attention=upcast_attention,
132
+ out_bias=attention_out_bias,
133
+ use_tpu_flash_attention=use_tpu_flash_attention,
134
+ qk_norm=qk_norm,
135
+ use_rope=use_rope,
136
+ ) # is self-attn if encoder_hidden_states is none
137
+
138
+ if adaptive_norm == "none":
139
+ self.attn2_norm = make_norm_layer(dim, norm_eps, norm_elementwise_affine)
140
+ else:
141
+ self.attn2 = None
142
+ self.attn2_norm = None
143
+
144
+ self.norm2 = make_norm_layer(dim, norm_eps, norm_elementwise_affine)
145
+
146
+ # 3. Feed-forward
147
+ self.ff = FeedForward(
148
+ dim,
149
+ dropout=dropout,
150
+ activation_fn=activation_fn,
151
+ final_dropout=final_dropout,
152
+ inner_dim=ff_inner_dim,
153
+ bias=ff_bias,
154
+ )
155
+
156
+ # 5. Scale-shift for PixArt-Alpha.
157
+ if adaptive_norm != "none":
158
+ num_ada_params = 4 if adaptive_norm == "single_scale" else 6
159
+ self.scale_shift_table = nn.Parameter(torch.randn(num_ada_params, dim) / dim**0.5)
160
+
161
+ # let chunk size default to None
162
+ self._chunk_size = None
163
+ self._chunk_dim = 0
164
+
165
+
166
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
167
+ # Sets chunk feed-forward
168
+ self._chunk_size = chunk_size
169
+ self._chunk_dim = dim
170
+
171
+ def forward(
172
+ self,
173
+ hidden_states: torch.FloatTensor,
174
+ freqs_cis: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
175
+ attention_mask: Optional[torch.FloatTensor] = None,
176
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
177
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
178
+ timestep: Optional[torch.LongTensor] = None,
179
+ cross_attention_kwargs: Dict[str, Any] = None,
180
+ class_labels: Optional[torch.LongTensor] = None,
181
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
182
+ ) -> torch.FloatTensor:
183
+ if cross_attention_kwargs is not None:
184
+ if cross_attention_kwargs.get("scale", None) is not None:
185
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is depcrecated. `scale` will be ignored.")
186
+
187
+ # Notice that normalization is always applied before the real computation in the following blocks.
188
+ # 0. Self-Attention
189
+ batch_size = hidden_states.shape[0]
190
+
191
+ norm_hidden_states = self.norm1(hidden_states)
192
+
193
+ # Apply ada_norm_single
194
+ if self.adaptive_norm in ["single_scale_shift", "single_scale"]:
195
+ assert timestep.ndim == 3 # [batch, 1 or num_tokens, embedding_dim]
196
+ num_ada_params = self.scale_shift_table.shape[0]
197
+ ada_values = self.scale_shift_table[None, None] + timestep.reshape(
198
+ batch_size, timestep.shape[1], num_ada_params, -1
199
+ )
200
+ if self.adaptive_norm == "single_scale_shift":
201
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = ada_values.unbind(dim=2)
202
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
203
+ else:
204
+ scale_msa, gate_msa, scale_mlp, gate_mlp = ada_values.unbind(dim=2)
205
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa)
206
+ elif self.adaptive_norm == "none":
207
+ scale_msa, gate_msa, scale_mlp, gate_mlp = None, None, None, None
208
+ else:
209
+ raise ValueError(f"Unknown adaptive norm type: {self.adaptive_norm}")
210
+
211
+ norm_hidden_states = norm_hidden_states.squeeze(1) # TODO: Check if this is needed
212
+
213
+ # 1. Prepare GLIGEN inputs
214
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
215
+
216
+ attn_output = self.attn1(
217
+ norm_hidden_states,
218
+ freqs_cis=freqs_cis,
219
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
220
+ attention_mask=attention_mask,
221
+ **cross_attention_kwargs,
222
+ )
223
+ if gate_msa is not None:
224
+ attn_output = gate_msa * attn_output
225
+
226
+ hidden_states = attn_output + hidden_states
227
+ if hidden_states.ndim == 4:
228
+ hidden_states = hidden_states.squeeze(1)
229
+
230
+ # 3. Cross-Attention
231
+ if self.attn2 is not None:
232
+ if self.adaptive_norm == "none":
233
+ attn_input = self.attn2_norm(hidden_states)
234
+ else:
235
+ attn_input = hidden_states
236
+ attn_output = self.attn2(
237
+ attn_input,
238
+ freqs_cis=freqs_cis,
239
+ encoder_hidden_states=encoder_hidden_states,
240
+ attention_mask=encoder_attention_mask,
241
+ **cross_attention_kwargs,
242
+ )
243
+ hidden_states = attn_output + hidden_states
244
+
245
+ # 4. Feed-forward
246
+ norm_hidden_states = self.norm2(hidden_states)
247
+ if self.adaptive_norm == "single_scale_shift":
248
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
249
+ elif self.adaptive_norm == "single_scale":
250
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp)
251
+ elif self.adaptive_norm == "none":
252
+ pass
253
+ else:
254
+ raise ValueError(f"Unknown adaptive norm type: {self.adaptive_norm}")
255
+
256
+ if self._chunk_size is not None:
257
+ # "feed_forward_chunk_size" can be used to save memory
258
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
259
+ else:
260
+ ff_output = self.ff(norm_hidden_states)
261
+ if gate_mlp is not None:
262
+ ff_output = gate_mlp * ff_output
263
+
264
+ hidden_states = ff_output + hidden_states
265
+ if hidden_states.ndim == 4:
266
+ hidden_states = hidden_states.squeeze(1)
267
+
268
+ return hidden_states
269
+
270
+
271
+ @maybe_allow_in_graph
272
+ class Attention(nn.Module):
273
+ r"""
274
+ A cross attention layer.
275
+
276
+ Parameters:
277
+ query_dim (`int`):
278
+ The number of channels in the query.
279
+ cross_attention_dim (`int`, *optional*):
280
+ The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.
281
+ heads (`int`, *optional*, defaults to 8):
282
+ The number of heads to use for multi-head attention.
283
+ dim_head (`int`, *optional*, defaults to 64):
284
+ The number of channels in each head.
285
+ dropout (`float`, *optional*, defaults to 0.0):
286
+ The dropout probability to use.
287
+ bias (`bool`, *optional*, defaults to False):
288
+ Set to `True` for the query, key, and value linear layers to contain a bias parameter.
289
+ upcast_attention (`bool`, *optional*, defaults to False):
290
+ Set to `True` to upcast the attention computation to `float32`.
291
+ upcast_softmax (`bool`, *optional*, defaults to False):
292
+ Set to `True` to upcast the softmax computation to `float32`.
293
+ cross_attention_norm (`str`, *optional*, defaults to `None`):
294
+ The type of normalization to use for the cross attention. Can be `None`, `layer_norm`, or `group_norm`.
295
+ cross_attention_norm_num_groups (`int`, *optional*, defaults to 32):
296
+ The number of groups to use for the group norm in the cross attention.
297
+ added_kv_proj_dim (`int`, *optional*, defaults to `None`):
298
+ The number of channels to use for the added key and value projections. If `None`, no projection is used.
299
+ norm_num_groups (`int`, *optional*, defaults to `None`):
300
+ The number of groups to use for the group norm in the attention.
301
+ spatial_norm_dim (`int`, *optional*, defaults to `None`):
302
+ The number of channels to use for the spatial normalization.
303
+ out_bias (`bool`, *optional*, defaults to `True`):
304
+ Set to `True` to use a bias in the output linear layer.
305
+ scale_qk (`bool`, *optional*, defaults to `True`):
306
+ Set to `True` to scale the query and key by `1 / sqrt(dim_head)`.
307
+ qk_norm (`str`, *optional*, defaults to None):
308
+ Set to 'layer_norm' or `rms_norm` to perform query and key normalization.
309
+ only_cross_attention (`bool`, *optional*, defaults to `False`):
310
+ Set to `True` to only use cross attention and not added_kv_proj_dim. Can only be set to `True` if
311
+ `added_kv_proj_dim` is not `None`.
312
+ eps (`float`, *optional*, defaults to 1e-5):
313
+ An additional value added to the denominator in group normalization that is used for numerical stability.
314
+ rescale_output_factor (`float`, *optional*, defaults to 1.0):
315
+ A factor to rescale the output by dividing it with this value.
316
+ residual_connection (`bool`, *optional*, defaults to `False`):
317
+ Set to `True` to add the residual connection to the output.
318
+ _from_deprecated_attn_block (`bool`, *optional*, defaults to `False`):
319
+ Set to `True` if the attention block is loaded from a deprecated state dict.
320
+ processor (`AttnProcessor`, *optional*, defaults to `None`):
321
+ The attention processor to use. If `None`, defaults to `AttnProcessor2_0` if `torch 2.x` is used and
322
+ `AttnProcessor` otherwise.
323
+ """
324
+
325
+ def __init__(
326
+ self,
327
+ query_dim: int,
328
+ cross_attention_dim: Optional[int] = None,
329
+ heads: int = 8,
330
+ dim_head: int = 64,
331
+ dropout: float = 0.0,
332
+ bias: bool = False,
333
+ upcast_attention: bool = False,
334
+ upcast_softmax: bool = False,
335
+ cross_attention_norm: Optional[str] = None,
336
+ cross_attention_norm_num_groups: int = 32,
337
+ added_kv_proj_dim: Optional[int] = None,
338
+ norm_num_groups: Optional[int] = None,
339
+ spatial_norm_dim: Optional[int] = None,
340
+ out_bias: bool = True,
341
+ scale_qk: bool = True,
342
+ qk_norm: Optional[str] = None,
343
+ only_cross_attention: bool = False,
344
+ eps: float = 1e-5,
345
+ rescale_output_factor: float = 1.0,
346
+ residual_connection: bool = False,
347
+ _from_deprecated_attn_block: bool = False,
348
+ processor: Optional["AttnProcessor"] = None,
349
+ out_dim: int = None,
350
+ use_tpu_flash_attention: bool = False,
351
+ use_rope: bool = False,
352
+ ):
353
+ super().__init__()
354
+ self.inner_dim = out_dim if out_dim is not None else dim_head * heads
355
+ self.query_dim = query_dim
356
+ self.use_bias = bias
357
+ self.is_cross_attention = cross_attention_dim is not None
358
+ self.cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim
359
+ self.upcast_attention = upcast_attention
360
+ self.upcast_softmax = upcast_softmax
361
+ self.rescale_output_factor = rescale_output_factor
362
+ self.residual_connection = residual_connection
363
+ self.dropout = dropout
364
+ self.fused_projections = False
365
+ self.out_dim = out_dim if out_dim is not None else query_dim
366
+ self.use_tpu_flash_attention = use_tpu_flash_attention
367
+ self.use_rope = use_rope
368
+
369
+ # we make use of this private variable to know whether this class is loaded
370
+ # with an deprecated state dict so that we can convert it on the fly
371
+ self._from_deprecated_attn_block = _from_deprecated_attn_block
372
+
373
+ self.scale_qk = scale_qk
374
+ self.scale = dim_head**-0.5 if self.scale_qk else 1.0
375
+
376
+ if qk_norm is None:
377
+ self.q_norm = nn.Identity()
378
+ self.k_norm = nn.Identity()
379
+ elif qk_norm == "rms_norm":
380
+ self.q_norm = RMSNorm(dim_head * heads, eps=1e-5)
381
+ self.k_norm = RMSNorm(dim_head * heads, eps=1e-5)
382
+ elif qk_norm == "layer_norm":
383
+ self.q_norm = nn.LayerNorm(dim_head * heads, eps=1e-5)
384
+ self.k_norm = nn.LayerNorm(dim_head * heads, eps=1e-5)
385
+ else:
386
+ raise ValueError(f"Unsupported qk_norm method: {qk_norm}")
387
+
388
+ self.heads = out_dim // dim_head if out_dim is not None else heads
389
+ # for slice_size > 0 the attention score computation
390
+ # is split across the batch axis to save memory
391
+ # You can set slice_size with `set_attention_slice`
392
+ self.sliceable_head_dim = heads
393
+
394
+ self.added_kv_proj_dim = added_kv_proj_dim
395
+ self.only_cross_attention = only_cross_attention
396
+
397
+ if self.added_kv_proj_dim is None and self.only_cross_attention:
398
+ raise ValueError(
399
+ "`only_cross_attention` can only be set to True if `added_kv_proj_dim` is not None. Make sure to set either `only_cross_attention=False` or define `added_kv_proj_dim`."
400
+ )
401
+
402
+ if norm_num_groups is not None:
403
+ self.group_norm = nn.GroupNorm(num_channels=query_dim, num_groups=norm_num_groups, eps=eps, affine=True)
404
+ else:
405
+ self.group_norm = None
406
+
407
+ if spatial_norm_dim is not None:
408
+ self.spatial_norm = SpatialNorm(f_channels=query_dim, zq_channels=spatial_norm_dim)
409
+ else:
410
+ self.spatial_norm = None
411
+
412
+ if cross_attention_norm is None:
413
+ self.norm_cross = None
414
+ elif cross_attention_norm == "layer_norm":
415
+ self.norm_cross = nn.LayerNorm(self.cross_attention_dim)
416
+ elif cross_attention_norm == "group_norm":
417
+ if self.added_kv_proj_dim is not None:
418
+ # The given `encoder_hidden_states` are initially of shape
419
+ # (batch_size, seq_len, added_kv_proj_dim) before being projected
420
+ # to (batch_size, seq_len, cross_attention_dim). The norm is applied
421
+ # before the projection, so we need to use `added_kv_proj_dim` as
422
+ # the number of channels for the group norm.
423
+ norm_cross_num_channels = added_kv_proj_dim
424
+ else:
425
+ norm_cross_num_channels = self.cross_attention_dim
426
+
427
+ self.norm_cross = nn.GroupNorm(
428
+ num_channels=norm_cross_num_channels, num_groups=cross_attention_norm_num_groups, eps=1e-5, affine=True
429
+ )
430
+ else:
431
+ raise ValueError(
432
+ f"unknown cross_attention_norm: {cross_attention_norm}. Should be None, 'layer_norm' or 'group_norm'"
433
+ )
434
+
435
+ linear_cls = nn.Linear
436
+
437
+ self.linear_cls = linear_cls
438
+ self.to_q = linear_cls(query_dim, self.inner_dim, bias=bias)
439
+
440
+ if not self.only_cross_attention:
441
+ # only relevant for the `AddedKVProcessor` classes
442
+ self.to_k = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)
443
+ self.to_v = linear_cls(self.cross_attention_dim, self.inner_dim, bias=bias)
444
+ else:
445
+ self.to_k = None
446
+ self.to_v = None
447
+
448
+ if self.added_kv_proj_dim is not None:
449
+ self.add_k_proj = linear_cls(added_kv_proj_dim, self.inner_dim)
450
+ self.add_v_proj = linear_cls(added_kv_proj_dim, self.inner_dim)
451
+
452
+ self.to_out = nn.ModuleList([])
453
+ self.to_out.append(linear_cls(self.inner_dim, self.out_dim, bias=out_bias))
454
+ self.to_out.append(nn.Dropout(dropout))
455
+
456
+ # set attention processor
457
+ # We use the AttnProcessor2_0 by default when torch 2.x is used which uses
458
+ # torch.nn.functional.scaled_dot_product_attention for native Flash/memory_efficient_attention
459
+ # but only if it has the default `scale` argument. TODO remove scale_qk check when we move to torch 2.1
460
+ if processor is None:
461
+ processor = AttnProcessor2_0()
462
+ self.set_processor(processor)
463
+
464
+ def set_processor(self, processor: "AttnProcessor") -> None:
465
+ r"""
466
+ Set the attention processor to use.
467
+
468
+ Args:
469
+ processor (`AttnProcessor`):
470
+ The attention processor to use.
471
+ """
472
+ # if current processor is in `self._modules` and if passed `processor` is not, we need to
473
+ # pop `processor` from `self._modules`
474
+ if (
475
+ hasattr(self, "processor")
476
+ and isinstance(self.processor, torch.nn.Module)
477
+ and not isinstance(processor, torch.nn.Module)
478
+ ):
479
+ logger.info(f"You are removing possibly trained weights of {self.processor} with {processor}")
480
+ self._modules.pop("processor")
481
+
482
+ self.processor = processor
483
+
484
+ def get_processor(self, return_deprecated_lora: bool = False) -> "AttentionProcessor": # noqa: F821
485
+ r"""
486
+ Get the attention processor in use.
487
+
488
+ Args:
489
+ return_deprecated_lora (`bool`, *optional*, defaults to `False`):
490
+ Set to `True` to return the deprecated LoRA attention processor.
491
+
492
+ Returns:
493
+ "AttentionProcessor": The attention processor in use.
494
+ """
495
+ if not return_deprecated_lora:
496
+ return self.processor
497
+
498
+ # TODO(Sayak, Patrick). The rest of the function is needed to ensure backwards compatible
499
+ # serialization format for LoRA Attention Processors. It should be deleted once the integration
500
+ # with PEFT is completed.
501
+ is_lora_activated = {
502
+ name: module.lora_layer is not None
503
+ for name, module in self.named_modules()
504
+ if hasattr(module, "lora_layer")
505
+ }
506
+
507
+ # 1. if no layer has a LoRA activated we can return the processor as usual
508
+ if not any(is_lora_activated.values()):
509
+ return self.processor
510
+
511
+ # If doesn't apply LoRA do `add_k_proj` or `add_v_proj`
512
+ is_lora_activated.pop("add_k_proj", None)
513
+ is_lora_activated.pop("add_v_proj", None)
514
+ # 2. else it is not posssible that only some layers have LoRA activated
515
+ if not all(is_lora_activated.values()):
516
+ raise ValueError(
517
+ f"Make sure that either all layers or no layers have LoRA activated, but have {is_lora_activated}"
518
+ )
519
+
520
+ # 3. And we need to merge the current LoRA layers into the corresponding LoRA attention processor
521
+ non_lora_processor_cls_name = self.processor.__class__.__name__
522
+ lora_processor_cls = getattr(import_module(__name__), "LoRA" + non_lora_processor_cls_name)
523
+
524
+ hidden_size = self.inner_dim
525
+
526
+ # now create a LoRA attention processor from the LoRA layers
527
+ if lora_processor_cls in [LoRAAttnProcessor, LoRAAttnProcessor2_0, LoRAXFormersAttnProcessor]:
528
+ kwargs = {
529
+ "cross_attention_dim": self.cross_attention_dim,
530
+ "rank": self.to_q.lora_layer.rank,
531
+ "network_alpha": self.to_q.lora_layer.network_alpha,
532
+ "q_rank": self.to_q.lora_layer.rank,
533
+ "q_hidden_size": self.to_q.lora_layer.out_features,
534
+ "k_rank": self.to_k.lora_layer.rank,
535
+ "k_hidden_size": self.to_k.lora_layer.out_features,
536
+ "v_rank": self.to_v.lora_layer.rank,
537
+ "v_hidden_size": self.to_v.lora_layer.out_features,
538
+ "out_rank": self.to_out[0].lora_layer.rank,
539
+ "out_hidden_size": self.to_out[0].lora_layer.out_features,
540
+ }
541
+
542
+ if hasattr(self.processor, "attention_op"):
543
+ kwargs["attention_op"] = self.processor.attention_op
544
+
545
+ lora_processor = lora_processor_cls(hidden_size, **kwargs)
546
+ lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())
547
+ lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())
548
+ lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())
549
+ lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict())
550
+ elif lora_processor_cls == LoRAAttnAddedKVProcessor:
551
+ lora_processor = lora_processor_cls(
552
+ hidden_size,
553
+ cross_attention_dim=self.add_k_proj.weight.shape[0],
554
+ rank=self.to_q.lora_layer.rank,
555
+ network_alpha=self.to_q.lora_layer.network_alpha,
556
+ )
557
+ lora_processor.to_q_lora.load_state_dict(self.to_q.lora_layer.state_dict())
558
+ lora_processor.to_k_lora.load_state_dict(self.to_k.lora_layer.state_dict())
559
+ lora_processor.to_v_lora.load_state_dict(self.to_v.lora_layer.state_dict())
560
+ lora_processor.to_out_lora.load_state_dict(self.to_out[0].lora_layer.state_dict())
561
+
562
+ # only save if used
563
+ if self.add_k_proj.lora_layer is not None:
564
+ lora_processor.add_k_proj_lora.load_state_dict(self.add_k_proj.lora_layer.state_dict())
565
+ lora_processor.add_v_proj_lora.load_state_dict(self.add_v_proj.lora_layer.state_dict())
566
+ else:
567
+ lora_processor.add_k_proj_lora = None
568
+ lora_processor.add_v_proj_lora = None
569
+ else:
570
+ raise ValueError(f"{lora_processor_cls} does not exist.")
571
+
572
+ return lora_processor
573
+
574
+ def forward(
575
+ self,
576
+ hidden_states: torch.FloatTensor,
577
+ freqs_cis: Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] = None,
578
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
579
+ attention_mask: Optional[torch.FloatTensor] = None,
580
+ **cross_attention_kwargs,
581
+ ) -> torch.Tensor:
582
+ r"""
583
+ The forward method of the `Attention` class.
584
+
585
+ Args:
586
+ hidden_states (`torch.Tensor`):
587
+ The hidden states of the query.
588
+ encoder_hidden_states (`torch.Tensor`, *optional*):
589
+ The hidden states of the encoder.
590
+ attention_mask (`torch.Tensor`, *optional*):
591
+ The attention mask to use. If `None`, no mask is applied.
592
+ **cross_attention_kwargs:
593
+ Additional keyword arguments to pass along to the cross attention.
594
+
595
+ Returns:
596
+ `torch.Tensor`: The output of the attention layer.
597
+ """
598
+ # The `Attention` class can call different attention processors / attention functions
599
+ # here we simply pass along all tensors to the selected processor class
600
+ # For standard processors that are defined here, `**cross_attention_kwargs` is empty
601
+
602
+ attn_parameters = set(inspect.signature(self.processor.__call__).parameters.keys())
603
+ unused_kwargs = [k for k, _ in cross_attention_kwargs.items() if k not in attn_parameters]
604
+ if len(unused_kwargs) > 0:
605
+ logger.warning(
606
+ f"cross_attention_kwargs {unused_kwargs} are not expected by"
607
+ f" {self.processor.__class__.__name__} and will be ignored."
608
+ )
609
+ cross_attention_kwargs = {k: w for k, w in cross_attention_kwargs.items() if k in attn_parameters}
610
+
611
+ return self.processor(
612
+ self,
613
+ hidden_states,
614
+ freqs_cis=freqs_cis,
615
+ encoder_hidden_states=encoder_hidden_states,
616
+ attention_mask=attention_mask,
617
+ **cross_attention_kwargs,
618
+ )
619
+
620
+ def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor:
621
+ r"""
622
+ Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads`
623
+ is the number of heads initialized while constructing the `Attention` class.
624
+
625
+ Args:
626
+ tensor (`torch.Tensor`): The tensor to reshape.
627
+
628
+ Returns:
629
+ `torch.Tensor`: The reshaped tensor.
630
+ """
631
+ head_size = self.heads
632
+ batch_size, seq_len, dim = tensor.shape
633
+ tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
634
+ tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)
635
+ return tensor
636
+
637
+ def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor:
638
+ r"""
639
+ Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is
640
+ the number of heads initialized while constructing the `Attention` class.
641
+
642
+ Args:
643
+ tensor (`torch.Tensor`): The tensor to reshape.
644
+ out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is
645
+ reshaped to `[batch_size * heads, seq_len, dim // heads]`.
646
+
647
+ Returns:
648
+ `torch.Tensor`: The reshaped tensor.
649
+ """
650
+
651
+ head_size = self.heads
652
+ if tensor.ndim == 3:
653
+ batch_size, seq_len, dim = tensor.shape
654
+ extra_dim = 1
655
+ else:
656
+ batch_size, extra_dim, seq_len, dim = tensor.shape
657
+ tensor = tensor.reshape(batch_size, seq_len * extra_dim, head_size, dim // head_size)
658
+ tensor = tensor.permute(0, 2, 1, 3)
659
+
660
+ if out_dim == 3:
661
+ tensor = tensor.reshape(batch_size * head_size, seq_len * extra_dim, dim // head_size)
662
+
663
+ return tensor
664
+
665
+ def get_attention_scores(
666
+ self, query: torch.Tensor, key: torch.Tensor, attention_mask: torch.Tensor = None
667
+ ) -> torch.Tensor:
668
+ r"""
669
+ Compute the attention scores.
670
+
671
+ Args:
672
+ query (`torch.Tensor`): The query tensor.
673
+ key (`torch.Tensor`): The key tensor.
674
+ attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.
675
+
676
+ Returns:
677
+ `torch.Tensor`: The attention probabilities/scores.
678
+ """
679
+ dtype = query.dtype
680
+ if self.upcast_attention:
681
+ query = query.float()
682
+ key = key.float()
683
+
684
+ if attention_mask is None:
685
+ baddbmm_input = torch.empty(
686
+ query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device
687
+ )
688
+ beta = 0
689
+ else:
690
+ baddbmm_input = attention_mask
691
+ beta = 1
692
+
693
+ attention_scores = torch.baddbmm(
694
+ baddbmm_input,
695
+ query,
696
+ key.transpose(-1, -2),
697
+ beta=beta,
698
+ alpha=self.scale,
699
+ )
700
+ del baddbmm_input
701
+
702
+ if self.upcast_softmax:
703
+ attention_scores = attention_scores.float()
704
+
705
+ attention_probs = attention_scores.softmax(dim=-1)
706
+ del attention_scores
707
+
708
+ attention_probs = attention_probs.to(dtype)
709
+
710
+ return attention_probs
711
+
712
+ def prepare_attention_mask(
713
+ self, attention_mask: torch.Tensor, target_length: int, batch_size: int, out_dim: int = 3
714
+ ) -> torch.Tensor:
715
+ r"""
716
+ Prepare the attention mask for the attention computation.
717
+
718
+ Args:
719
+ attention_mask (`torch.Tensor`):
720
+ The attention mask to prepare.
721
+ target_length (`int`):
722
+ The target length of the attention mask. This is the length of the attention mask after padding.
723
+ batch_size (`int`):
724
+ The batch size, which is used to repeat the attention mask.
725
+ out_dim (`int`, *optional*, defaults to `3`):
726
+ The output dimension of the attention mask. Can be either `3` or `4`.
727
+
728
+ Returns:
729
+ `torch.Tensor`: The prepared attention mask.
730
+ """
731
+ head_size = self.heads
732
+ if attention_mask is None:
733
+ return attention_mask
734
+
735
+ current_length: int = attention_mask.shape[-1]
736
+ if current_length != target_length:
737
+ if attention_mask.device.type == "mps":
738
+ # HACK: MPS: Does not support padding by greater than dimension of input tensor.
739
+ # Instead, we can manually construct the padding tensor.
740
+ padding_shape = (attention_mask.shape[0], attention_mask.shape[1], target_length)
741
+ padding = torch.zeros(padding_shape, dtype=attention_mask.dtype, device=attention_mask.device)
742
+ attention_mask = torch.cat([attention_mask, padding], dim=2)
743
+ else:
744
+ # TODO: for pipelines such as stable-diffusion, padding cross-attn mask:
745
+ # we want to instead pad by (0, remaining_length), where remaining_length is:
746
+ # remaining_length: int = target_length - current_length
747
+ # TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding
748
+ attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
749
+
750
+ if out_dim == 3:
751
+ if attention_mask.shape[0] < batch_size * head_size:
752
+ attention_mask = attention_mask.repeat_interleave(head_size, dim=0)
753
+ elif out_dim == 4:
754
+ attention_mask = attention_mask.unsqueeze(1)
755
+ attention_mask = attention_mask.repeat_interleave(head_size, dim=1)
756
+
757
+ return attention_mask
758
+
759
+ def norm_encoder_hidden_states(self, encoder_hidden_states: torch.Tensor) -> torch.Tensor:
760
+ r"""
761
+ Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the
762
+ `Attention` class.
763
+
764
+ Args:
765
+ encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder.
766
+
767
+ Returns:
768
+ `torch.Tensor`: The normalized encoder hidden states.
769
+ """
770
+ assert self.norm_cross is not None, "self.norm_cross must be defined to call self.norm_encoder_hidden_states"
771
+
772
+ if isinstance(self.norm_cross, nn.LayerNorm):
773
+ encoder_hidden_states = self.norm_cross(encoder_hidden_states)
774
+ elif isinstance(self.norm_cross, nn.GroupNorm):
775
+ # Group norm norms along the channels dimension and expects
776
+ # input to be in the shape of (N, C, *). In this case, we want
777
+ # to norm along the hidden dimension, so we need to move
778
+ # (batch_size, sequence_length, hidden_size) ->
779
+ # (batch_size, hidden_size, sequence_length)
780
+ encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
781
+ encoder_hidden_states = self.norm_cross(encoder_hidden_states)
782
+ encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
783
+ else:
784
+ assert False
785
+
786
+ return encoder_hidden_states
787
+
788
+ @staticmethod
789
+ def apply_rotary_emb(
790
+ input_tensor: torch.Tensor,
791
+ freqs_cis: Tuple[torch.FloatTensor, torch.FloatTensor],
792
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
793
+ cos_freqs = freqs_cis[0]
794
+ sin_freqs = freqs_cis[1]
795
+
796
+ t_dup = rearrange(input_tensor, "... (d r) -> ... d r", r=2)
797
+ t1, t2 = t_dup.unbind(dim=-1)
798
+ t_dup = torch.stack((-t2, t1), dim=-1)
799
+ input_tensor_rot = rearrange(t_dup, "... d r -> ... (d r)")
800
+
801
+ out = input_tensor * cos_freqs + input_tensor_rot * sin_freqs
802
+
803
+ return out
804
+
805
+
806
+ class AttnProcessor2_0:
807
+ r"""
808
+ Processor for implementing scaled dot-product attention (enabled by default if you're using PyTorch 2.0).
809
+ """
810
+
811
+ def __init__(self):
812
+ pass
813
+
814
+ def __call__(
815
+ self,
816
+ attn: Attention,
817
+ hidden_states: torch.FloatTensor,
818
+ freqs_cis: Tuple[torch.FloatTensor, torch.FloatTensor],
819
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
820
+ attention_mask: Optional[torch.FloatTensor] = None,
821
+ temb: Optional[torch.FloatTensor] = None,
822
+ *args,
823
+ **kwargs,
824
+ ) -> torch.FloatTensor:
825
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
826
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
827
+ deprecate("scale", "1.0.0", deprecation_message)
828
+
829
+ residual = hidden_states
830
+ if attn.spatial_norm is not None:
831
+ hidden_states = attn.spatial_norm(hidden_states, temb)
832
+
833
+ input_ndim = hidden_states.ndim
834
+
835
+ if input_ndim == 4:
836
+ batch_size, channel, height, width = hidden_states.shape
837
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
838
+
839
+ batch_size, sequence_length, _ = (
840
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
841
+ )
842
+
843
+ if (attention_mask is not None) and (not attn.use_tpu_flash_attention):
844
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
845
+ # scaled_dot_product_attention expects attention_mask shape to be
846
+ # (batch, heads, source_length, target_length)
847
+ attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1])
848
+
849
+ if attn.group_norm is not None:
850
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
851
+
852
+ query = attn.to_q(hidden_states)
853
+ query = attn.q_norm(query)
854
+
855
+ if encoder_hidden_states is not None:
856
+ if attn.norm_cross:
857
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
858
+ key = attn.to_k(encoder_hidden_states)
859
+ key = attn.k_norm(key)
860
+ else: # if no context provided do self-attention
861
+ encoder_hidden_states = hidden_states
862
+ key = attn.to_k(hidden_states)
863
+ key = attn.k_norm(key)
864
+ if attn.use_rope:
865
+ key = attn.apply_rotary_emb(key, freqs_cis)
866
+ query = attn.apply_rotary_emb(query, freqs_cis)
867
+
868
+ value = attn.to_v(encoder_hidden_states)
869
+
870
+ inner_dim = key.shape[-1]
871
+ head_dim = inner_dim // attn.heads
872
+
873
+ query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
874
+ key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
875
+ value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2)
876
+
877
+ # the output of sdp = (batch, num_heads, seq_len, head_dim)
878
+
879
+ if attn.use_tpu_flash_attention: # use tpu attention offload 'flash attention'
880
+ q_segment_indexes = None
881
+ if attention_mask is not None: # if mask is required need to tune both segmenIds fields
882
+ # attention_mask = torch.squeeze(attention_mask).to(torch.float32)
883
+ attention_mask = attention_mask.to(torch.float32)
884
+ q_segment_indexes = torch.ones(batch_size, query.shape[2], device=query.device, dtype=torch.float32)
885
+ assert (
886
+ attention_mask.shape[1] == key.shape[2]
887
+ ), f"ERROR: KEY SHAPE must be same as attention mask [{key.shape[2]}, {attention_mask.shape[1]}]"
888
+
889
+ assert (
890
+ query.shape[2] % 128 == 0
891
+ ), f"ERROR: QUERY SHAPE must be divisible by 128 (TPU limitation) [{query.shape[2]}]"
892
+ assert (
893
+ key.shape[2] % 128 == 0
894
+ ), f"ERROR: KEY SHAPE must be divisible by 128 (TPU limitation) [{key.shape[2]}]"
895
+
896
+ # run the TPU kernel implemented in jax with pallas
897
+ hidden_states = flash_attention(
898
+ q=query,
899
+ k=key,
900
+ v=value,
901
+ q_segment_ids=q_segment_indexes,
902
+ kv_segment_ids=attention_mask,
903
+ sm_scale=attn.scale,
904
+ )
905
+ else:
906
+ hidden_states = F.scaled_dot_product_attention(
907
+ query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False
908
+ )
909
+
910
+ hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim)
911
+ hidden_states = hidden_states.to(query.dtype)
912
+
913
+ # linear proj
914
+ hidden_states = attn.to_out[0](hidden_states)
915
+ # dropout
916
+ hidden_states = attn.to_out[1](hidden_states)
917
+
918
+ if input_ndim == 4:
919
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
920
+
921
+ if attn.residual_connection:
922
+ hidden_states = hidden_states + residual
923
+
924
+ hidden_states = hidden_states / attn.rescale_output_factor
925
+
926
+ return hidden_states
927
+
928
+
929
+ class AttnProcessor:
930
+ r"""
931
+ Default processor for performing attention-related computations.
932
+ """
933
+
934
+ def __call__(
935
+ self,
936
+ attn: Attention,
937
+ hidden_states: torch.FloatTensor,
938
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
939
+ attention_mask: Optional[torch.FloatTensor] = None,
940
+ temb: Optional[torch.FloatTensor] = None,
941
+ *args,
942
+ **kwargs,
943
+ ) -> torch.Tensor:
944
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
945
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
946
+ deprecate("scale", "1.0.0", deprecation_message)
947
+
948
+ residual = hidden_states
949
+
950
+ if attn.spatial_norm is not None:
951
+ hidden_states = attn.spatial_norm(hidden_states, temb)
952
+
953
+ input_ndim = hidden_states.ndim
954
+
955
+ if input_ndim == 4:
956
+ batch_size, channel, height, width = hidden_states.shape
957
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
958
+
959
+ batch_size, sequence_length, _ = (
960
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
961
+ )
962
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
963
+
964
+ if attn.group_norm is not None:
965
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
966
+
967
+ query = attn.to_q(hidden_states)
968
+
969
+ if encoder_hidden_states is None:
970
+ encoder_hidden_states = hidden_states
971
+ elif attn.norm_cross:
972
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
973
+
974
+ key = attn.to_k(encoder_hidden_states)
975
+ value = attn.to_v(encoder_hidden_states)
976
+
977
+ query = attn.head_to_batch_dim(query)
978
+ key = attn.head_to_batch_dim(key)
979
+ value = attn.head_to_batch_dim(value)
980
+
981
+ query = attn.q_norm(query)
982
+ key = attn.k_norm(key)
983
+
984
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
985
+ hidden_states = torch.bmm(attention_probs, value)
986
+ hidden_states = attn.batch_to_head_dim(hidden_states)
987
+
988
+ # linear proj
989
+ hidden_states = attn.to_out[0](hidden_states)
990
+ # dropout
991
+ hidden_states = attn.to_out[1](hidden_states)
992
+
993
+ if input_ndim == 4:
994
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
995
+
996
+ if attn.residual_connection:
997
+ hidden_states = hidden_states + residual
998
+
999
+ hidden_states = hidden_states / attn.rescale_output_factor
1000
+
1001
+ return hidden_states
1002
+
1003
+
1004
+ class FeedForward(nn.Module):
1005
+ r"""
1006
+ A feed-forward layer.
1007
+
1008
+ Parameters:
1009
+ dim (`int`): The number of channels in the input.
1010
+ dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
1011
+ mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
1012
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
1013
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
1014
+ final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
1015
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
1016
+ """
1017
+
1018
+ def __init__(
1019
+ self,
1020
+ dim: int,
1021
+ dim_out: Optional[int] = None,
1022
+ mult: int = 4,
1023
+ dropout: float = 0.0,
1024
+ activation_fn: str = "geglu",
1025
+ final_dropout: bool = False,
1026
+ inner_dim=None,
1027
+ bias: bool = True,
1028
+ ):
1029
+ super().__init__()
1030
+ if inner_dim is None:
1031
+ inner_dim = int(dim * mult)
1032
+ dim_out = dim_out if dim_out is not None else dim
1033
+ linear_cls = nn.Linear
1034
+
1035
+ if activation_fn == "gelu":
1036
+ act_fn = GELU(dim, inner_dim, bias=bias)
1037
+ elif activation_fn == "gelu-approximate":
1038
+ act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
1039
+ elif activation_fn == "geglu":
1040
+ act_fn = GEGLU(dim, inner_dim, bias=bias)
1041
+ elif activation_fn == "geglu-approximate":
1042
+ act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
1043
+ else:
1044
+ raise ValueError(f"Unsupported activation function: {activation_fn}")
1045
+
1046
+ self.net = nn.ModuleList([])
1047
+ # project in
1048
+ self.net.append(act_fn)
1049
+ # project dropout
1050
+ self.net.append(nn.Dropout(dropout))
1051
+ # project out
1052
+ self.net.append(linear_cls(inner_dim, dim_out, bias=bias))
1053
+ # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
1054
+ if final_dropout:
1055
+ self.net.append(nn.Dropout(dropout))
1056
+
1057
+ def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
1058
+ compatible_cls = (GEGLU, LoRACompatibleLinear)
1059
+ for module in self.net:
1060
+ if isinstance(module, compatible_cls):
1061
+ hidden_states = module(hidden_states, scale)
1062
+ else:
1063
+ hidden_states = module(hidden_states)
1064
+ return hidden_states
xora/models/transformers/symmetric_patchifier.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import Tuple
3
+
4
+ import torch
5
+ from diffusers.configuration_utils import ConfigMixin
6
+ from einops import rearrange
7
+ from torch import Tensor
8
+
9
+ from xora.utils.torch_utils import append_dims
10
+
11
+
12
+ class Patchifier(ConfigMixin, ABC):
13
+ def __init__(self, patch_size: int):
14
+ super().__init__()
15
+ self._patch_size = (1, patch_size, patch_size)
16
+
17
+ @abstractmethod
18
+ def patchify(self, latents: Tensor, frame_rates: Tensor, scale_grid: bool) -> Tuple[Tensor, Tensor]:
19
+ pass
20
+
21
+ @abstractmethod
22
+ def unpatchify(
23
+ self, latents: Tensor, output_height: int, output_width: int, output_num_frames: int, out_channels: int
24
+ ) -> Tuple[Tensor, Tensor]:
25
+ pass
26
+
27
+ @property
28
+ def patch_size(self):
29
+ return self._patch_size
30
+
31
+ def get_grid(self, orig_num_frames, orig_height, orig_width, batch_size, scale_grid, device):
32
+ f = orig_num_frames // self._patch_size[0]
33
+ h = orig_height // self._patch_size[1]
34
+ w = orig_width // self._patch_size[2]
35
+ grid_h = torch.arange(h, dtype=torch.float32, device=device)
36
+ grid_w = torch.arange(w, dtype=torch.float32, device=device)
37
+ grid_f = torch.arange(f, dtype=torch.float32, device=device)
38
+ grid = torch.meshgrid(grid_f, grid_h, grid_w)
39
+ grid = torch.stack(grid, dim=0)
40
+ grid = grid.unsqueeze(0).repeat(batch_size, 1, 1, 1, 1)
41
+
42
+ if scale_grid is not None:
43
+ for i in range(3):
44
+ if isinstance(scale_grid[i], Tensor):
45
+ scale = append_dims(scale_grid[i], grid.ndim - 1)
46
+ else:
47
+ scale = scale_grid[i]
48
+ grid[:, i, ...] = grid[:, i, ...] * scale * self._patch_size[i]
49
+
50
+ grid = rearrange(grid, "b c f h w -> b c (f h w)", b=batch_size)
51
+ return grid
52
+
53
+
54
+ def pixart_alpha_patchify(
55
+ latents: Tensor,
56
+ patch_size: int,
57
+ ) -> Tuple[Tensor, Tensor]:
58
+ latents = rearrange(
59
+ latents,
60
+ "b c (f p1) (h p2) (w p3) -> b (f h w) (c p1 p2 p3)",
61
+ p1=patch_size[0],
62
+ p2=patch_size[1],
63
+ p3=patch_size[2],
64
+ )
65
+ return latents
66
+
67
+ class SymmetricPatchifier(Patchifier):
68
+ def patchify(
69
+ self,
70
+ latents: Tensor,
71
+ ) -> Tuple[Tensor, Tensor]:
72
+ return pixart_alpha_patchify(latents, self._patch_size)
73
+
74
+ def unpatchify(
75
+ self, latents: Tensor, output_height: int, output_width: int, output_num_frames: int, out_channels: int
76
+ ) -> Tuple[Tensor, Tensor]:
77
+ output_height = output_height // self._patch_size[1]
78
+ output_width = output_width // self._patch_size[2]
79
+ latents = rearrange(
80
+ latents,
81
+ "b (f h w) (c p q) -> b c f (h p) (w q) ",
82
+ f=output_num_frames,
83
+ h=output_height,
84
+ w=output_width,
85
+ p=self._patch_size[1],
86
+ q=self._patch_size[2],
87
+ )
88
+ return latents
xora/models/transformers/transformer3d.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from: https://github.com/huggingface/diffusers/blob/v0.26.3/src/diffusers/models/transformers/transformer_2d.py
2
+ import math
3
+ from dataclasses import dataclass
4
+ from typing import Any, Dict, List, Optional
5
+
6
+ import torch
7
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
8
+ from diffusers.models.embeddings import PixArtAlphaTextProjection
9
+ from diffusers.models.modeling_utils import ModelMixin
10
+ from diffusers.models.normalization import AdaLayerNormSingle
11
+ from diffusers.utils import BaseOutput, is_torch_version
12
+ from torch import nn
13
+
14
+ from xora.models.transformers.attention import BasicTransformerBlock
15
+
16
+
17
+ @dataclass
18
+ class Transformer3DModelOutput(BaseOutput):
19
+ """
20
+ The output of [`Transformer2DModel`].
21
+
22
+ Args:
23
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete):
24
+ The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability
25
+ distributions for the unnoised latent pixels.
26
+ """
27
+
28
+ sample: torch.FloatTensor
29
+
30
+
31
+ class Transformer3DModel(ModelMixin, ConfigMixin):
32
+ _supports_gradient_checkpointing = True
33
+
34
+ @register_to_config
35
+ def __init__(
36
+ self,
37
+ num_attention_heads: int = 16,
38
+ attention_head_dim: int = 88,
39
+ in_channels: Optional[int] = None,
40
+ out_channels: Optional[int] = None,
41
+ num_layers: int = 1,
42
+ dropout: float = 0.0,
43
+ norm_num_groups: int = 32,
44
+ cross_attention_dim: Optional[int] = None,
45
+ attention_bias: bool = False,
46
+ num_vector_embeds: Optional[int] = None,
47
+ activation_fn: str = "geglu",
48
+ num_embeds_ada_norm: Optional[int] = None,
49
+ use_linear_projection: bool = False,
50
+ only_cross_attention: bool = False,
51
+ double_self_attention: bool = False,
52
+ upcast_attention: bool = False,
53
+ adaptive_norm: str = "single_scale_shift", # 'single_scale_shift' or 'single_scale'
54
+ standardization_norm: str = "layer_norm", # 'layer_norm' or 'rms_norm'
55
+ norm_elementwise_affine: bool = True,
56
+ norm_eps: float = 1e-5,
57
+ attention_type: str = "default",
58
+ caption_channels: int = None,
59
+ project_to_2d_pos: bool = False,
60
+ use_tpu_flash_attention: bool = False, # if True uses the TPU attention offload ('flash attention')
61
+ qk_norm: Optional[str] = None,
62
+ positional_embedding_type: str = "absolute",
63
+ positional_embedding_theta: Optional[float] = None,
64
+ positional_embedding_max_pos: Optional[List[int]] = None,
65
+ timestep_scale_multiplier: Optional[float] = None,
66
+ ):
67
+ super().__init__()
68
+ self.use_tpu_flash_attention = use_tpu_flash_attention # FIXME: push config down to the attention modules
69
+ self.use_linear_projection = use_linear_projection
70
+ self.num_attention_heads = num_attention_heads
71
+ self.attention_head_dim = attention_head_dim
72
+ inner_dim = num_attention_heads * attention_head_dim
73
+ self.inner_dim = inner_dim
74
+
75
+ self.project_to_2d_pos = project_to_2d_pos
76
+
77
+ self.patchify_proj = nn.Linear(in_channels, inner_dim, bias=True)
78
+
79
+ self.positional_embedding_type = positional_embedding_type
80
+ self.positional_embedding_theta = positional_embedding_theta
81
+ self.positional_embedding_max_pos = positional_embedding_max_pos
82
+ self.use_rope = self.positional_embedding_type == "rope"
83
+ self.timestep_scale_multiplier = timestep_scale_multiplier
84
+
85
+ if self.positional_embedding_type == "absolute":
86
+ embed_dim_3d = math.ceil((inner_dim / 2) * 3) if project_to_2d_pos else inner_dim
87
+ if self.project_to_2d_pos:
88
+ self.to_2d_proj = torch.nn.Linear(embed_dim_3d, inner_dim, bias=False)
89
+ self._init_to_2d_proj_weights(self.to_2d_proj)
90
+ elif self.positional_embedding_type == "rope":
91
+ if positional_embedding_theta is None:
92
+ raise ValueError(
93
+ "If `positional_embedding_type` type is rope, `positional_embedding_theta` must also be defined"
94
+ )
95
+ if positional_embedding_max_pos is None:
96
+ raise ValueError(
97
+ "If `positional_embedding_type` type is rope, `positional_embedding_max_pos` must also be defined"
98
+ )
99
+
100
+ # 3. Define transformers blocks
101
+ self.transformer_blocks = nn.ModuleList(
102
+ [
103
+ BasicTransformerBlock(
104
+ inner_dim,
105
+ num_attention_heads,
106
+ attention_head_dim,
107
+ dropout=dropout,
108
+ cross_attention_dim=cross_attention_dim,
109
+ activation_fn=activation_fn,
110
+ num_embeds_ada_norm=num_embeds_ada_norm,
111
+ attention_bias=attention_bias,
112
+ only_cross_attention=only_cross_attention,
113
+ double_self_attention=double_self_attention,
114
+ upcast_attention=upcast_attention,
115
+ adaptive_norm=adaptive_norm,
116
+ standardization_norm=standardization_norm,
117
+ norm_elementwise_affine=norm_elementwise_affine,
118
+ norm_eps=norm_eps,
119
+ attention_type=attention_type,
120
+ use_tpu_flash_attention=use_tpu_flash_attention,
121
+ qk_norm=qk_norm,
122
+ use_rope=self.use_rope,
123
+ )
124
+ for d in range(num_layers)
125
+ ]
126
+ )
127
+
128
+ # 4. Define output layers
129
+ self.out_channels = in_channels if out_channels is None else out_channels
130
+ self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
131
+ self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5)
132
+ self.proj_out = nn.Linear(inner_dim, self.out_channels)
133
+
134
+ # 5. PixArt-Alpha blocks.
135
+ self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=False)
136
+ if adaptive_norm == "single_scale":
137
+ # Use 4 channels instead of the 6 for the PixArt-Alpha scale + shift ada norm.
138
+ self.adaln_single.linear = nn.Linear(inner_dim, 4 * inner_dim, bias=True)
139
+
140
+ self.caption_projection = None
141
+ if caption_channels is not None:
142
+ self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim)
143
+
144
+ self.gradient_checkpointing = False
145
+
146
+ def _set_gradient_checkpointing(self, module, value=False):
147
+ if hasattr(module, "gradient_checkpointing"):
148
+ module.gradient_checkpointing = value
149
+
150
+ @staticmethod
151
+ def _init_to_2d_proj_weights(linear_layer):
152
+ input_features = linear_layer.weight.data.size(1)
153
+ output_features = linear_layer.weight.data.size(0)
154
+
155
+ # Start with a zero matrix
156
+ identity_like = torch.zeros((output_features, input_features))
157
+
158
+ # Fill the diagonal with 1's as much as possible
159
+ min_features = min(output_features, input_features)
160
+ identity_like[:min_features, :min_features] = torch.eye(min_features)
161
+ linear_layer.weight.data = identity_like.to(linear_layer.weight.data.device)
162
+
163
+ def get_fractional_positions(self, indices_grid):
164
+ fractional_positions = torch.stack(
165
+ [indices_grid[:, i] / self.positional_embedding_max_pos[i] for i in range(3)], dim=-1
166
+ )
167
+ return fractional_positions
168
+
169
+ def precompute_freqs_cis(self, indices_grid, spacing="exp"):
170
+ dtype = self.dtype
171
+ dim = self.inner_dim
172
+ theta = self.positional_embedding_theta
173
+
174
+ fractional_positions = self.get_fractional_positions(indices_grid)
175
+
176
+ start = 1
177
+ end = theta
178
+ device = fractional_positions.device
179
+ if spacing == "exp":
180
+ indices = theta ** (
181
+ torch.linspace(math.log(start, theta), math.log(end, theta), dim // 6, device=device, dtype=dtype)
182
+ )
183
+ indices = indices.to(dtype=dtype)
184
+ elif spacing == "exp_2":
185
+ indices = 1.0 / theta ** (torch.arange(0, dim, 6, device=device) / dim)
186
+ indices = indices.to(dtype=dtype)
187
+ elif spacing == "linear":
188
+ indices = torch.linspace(start, end, dim // 6, device=device, dtype=dtype)
189
+ elif spacing == "sqrt":
190
+ indices = torch.linspace(start**2, end**2, dim // 6, device=device, dtype=dtype).sqrt()
191
+
192
+ indices = indices * math.pi / 2
193
+
194
+ if spacing == "exp_2":
195
+ freqs = (indices * fractional_positions.unsqueeze(-1)).transpose(-1, -2).flatten(2)
196
+ else:
197
+ freqs = (indices * (fractional_positions.unsqueeze(-1) * 2 - 1)).transpose(-1, -2).flatten(2)
198
+
199
+ cos_freq = freqs.cos().repeat_interleave(2, dim=-1)
200
+ sin_freq = freqs.sin().repeat_interleave(2, dim=-1)
201
+ if dim % 6 != 0:
202
+ cos_padding = torch.ones_like(cos_freq[:, :, : dim % 6])
203
+ sin_padding = torch.zeros_like(cos_freq[:, :, : dim % 6])
204
+ cos_freq = torch.cat([cos_padding, cos_freq], dim=-1)
205
+ sin_freq = torch.cat([sin_padding, sin_freq], dim=-1)
206
+ return cos_freq, sin_freq
207
+
208
+ def forward(
209
+ self,
210
+ hidden_states: torch.Tensor,
211
+ indices_grid: torch.Tensor,
212
+ encoder_hidden_states: Optional[torch.Tensor] = None,
213
+ timestep: Optional[torch.LongTensor] = None,
214
+ class_labels: Optional[torch.LongTensor] = None,
215
+ cross_attention_kwargs: Dict[str, Any] = None,
216
+ attention_mask: Optional[torch.Tensor] = None,
217
+ encoder_attention_mask: Optional[torch.Tensor] = None,
218
+ return_dict: bool = True,
219
+ ):
220
+ """
221
+ The [`Transformer2DModel`] forward method.
222
+
223
+ Args:
224
+ hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
225
+ Input `hidden_states`.
226
+ indices_grid (`torch.LongTensor` of shape `(batch size, 3, num latent pixels)`):
227
+ encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
228
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
229
+ self-attention.
230
+ timestep ( `torch.LongTensor`, *optional*):
231
+ Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
232
+ class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
233
+ Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
234
+ `AdaLayerZeroNorm`.
235
+ cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
236
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
237
+ `self.processor` in
238
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
239
+ attention_mask ( `torch.Tensor`, *optional*):
240
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
241
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
242
+ negative values to the attention scores corresponding to "discard" tokens.
243
+ encoder_attention_mask ( `torch.Tensor`, *optional*):
244
+ Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
245
+
246
+ * Mask `(batch, sequence_length)` True = keep, False = discard.
247
+ * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
248
+
249
+ If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
250
+ above. This bias will be added to the cross-attention scores.
251
+ return_dict (`bool`, *optional*, defaults to `True`):
252
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
253
+ tuple.
254
+
255
+ Returns:
256
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
257
+ `tuple` where the first element is the sample tensor.
258
+ """
259
+ # for tpu attention offload 2d token masks are used. No need to transform.
260
+ if not self.use_tpu_flash_attention:
261
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
262
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
263
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
264
+ # expects mask of shape:
265
+ # [batch, key_tokens]
266
+ # adds singleton query_tokens dimension:
267
+ # [batch, 1, key_tokens]
268
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
269
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
270
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
271
+ if attention_mask is not None and attention_mask.ndim == 2:
272
+ # assume that mask is expressed as:
273
+ # (1 = keep, 0 = discard)
274
+ # convert mask into a bias that can be added to attention scores:
275
+ # (keep = +0, discard = -10000.0)
276
+ attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
277
+ attention_mask = attention_mask.unsqueeze(1)
278
+
279
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
280
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
281
+ encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
282
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
283
+
284
+ # 1. Input
285
+ hidden_states = self.patchify_proj(hidden_states)
286
+
287
+ if self.timestep_scale_multiplier:
288
+ timestep = self.timestep_scale_multiplier * timestep
289
+
290
+ if self.positional_embedding_type == "rope":
291
+ freqs_cis = self.precompute_freqs_cis(indices_grid)
292
+ else:
293
+ raise NotImplementedError("Only rope pos embed supported.")
294
+
295
+ batch_size = hidden_states.shape[0]
296
+ timestep, embedded_timestep = self.adaln_single(
297
+ timestep.flatten(),
298
+ {"resolution": None, "aspect_ratio": None},
299
+ batch_size=batch_size,
300
+ hidden_dtype=hidden_states.dtype,
301
+ )
302
+ # Second dimension is 1 or number of tokens (if timestep_per_token)
303
+ timestep = timestep.view(batch_size, -1, timestep.shape[-1])
304
+ embedded_timestep = embedded_timestep.view(batch_size, -1, embedded_timestep.shape[-1])
305
+
306
+ # 2. Blocks
307
+ if self.caption_projection is not None:
308
+ batch_size = hidden_states.shape[0]
309
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states)
310
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])
311
+
312
+ for block in self.transformer_blocks:
313
+ if self.training and self.gradient_checkpointing:
314
+
315
+ def create_custom_forward(module, return_dict=None):
316
+ def custom_forward(*inputs):
317
+ if return_dict is not None:
318
+ return module(*inputs, return_dict=return_dict)
319
+ else:
320
+ return module(*inputs)
321
+
322
+ return custom_forward
323
+
324
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
325
+ hidden_states = torch.utils.checkpoint.checkpoint(
326
+ create_custom_forward(block),
327
+ hidden_states,
328
+ freqs_cis,
329
+ attention_mask,
330
+ encoder_hidden_states,
331
+ encoder_attention_mask,
332
+ timestep,
333
+ cross_attention_kwargs,
334
+ class_labels,
335
+ **ckpt_kwargs,
336
+ )
337
+ else:
338
+ hidden_states = block(
339
+ hidden_states,
340
+ freqs_cis=freqs_cis,
341
+ attention_mask=attention_mask,
342
+ encoder_hidden_states=encoder_hidden_states,
343
+ encoder_attention_mask=encoder_attention_mask,
344
+ timestep=timestep,
345
+ cross_attention_kwargs=cross_attention_kwargs,
346
+ class_labels=class_labels,
347
+ )
348
+
349
+ # 3. Output
350
+ scale_shift_values = self.scale_shift_table[None, None] + embedded_timestep[:, :, None]
351
+ shift, scale = scale_shift_values[:, :, 0], scale_shift_values[:, :, 1]
352
+ hidden_states = self.norm_out(hidden_states)
353
+ # Modulation
354
+ hidden_states = hidden_states * (1 + scale) + shift
355
+ hidden_states = self.proj_out(hidden_states)
356
+ if not return_dict:
357
+ return (hidden_states,)
358
+
359
+ return Transformer3DModelOutput(sample=hidden_states)
360
+
xora/pipelines/__init__.py ADDED
File without changes
xora/pipelines/pipeline_video_pixart_alpha.py ADDED
@@ -0,0 +1,859 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # Adapted from: https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/pixart_alpha/pipeline_pixart_alpha.py
2
+ import html
3
+ import inspect
4
+ import math
5
+ import re
6
+ import urllib.parse as ul
7
+ from typing import Callable, Dict, List, Optional, Tuple, Union
8
+ from abc import ABC, abstractmethod
9
+
10
+
11
+ import torch
12
+ import torch.nn.functional as F
13
+ from torch import Tensor
14
+ from diffusers.image_processor import VaeImageProcessor
15
+ from diffusers.models import AutoencoderKL
16
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
17
+ from diffusers.schedulers import DPMSolverMultistepScheduler
18
+ from diffusers.utils import (
19
+ BACKENDS_MAPPING,
20
+ deprecate,
21
+ is_bs4_available,
22
+ is_ftfy_available,
23
+ logging,
24
+ replace_example_docstring,
25
+ )
26
+ from diffusers.utils.torch_utils import randn_tensor
27
+ from einops import rearrange
28
+ from transformers import T5EncoderModel, T5Tokenizer
29
+
30
+ from xora.models.transformers.transformer3d import Transformer3DModel
31
+ from xora.models.transformers.symmetric_patchifier import Patchifier
32
+ from xora.models.autoencoders.vae_encode import get_vae_size_scale_factor, vae_decode, vae_encode
33
+ from xora.models.autoencoders.causal_video_autoencoder import CausalVideoAutoencoder
34
+ from xora.schedulers.rf import TimestepShifter
35
+
36
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
37
+
38
+
39
+ if is_bs4_available():
40
+ from bs4 import BeautifulSoup
41
+
42
+ if is_ftfy_available():
43
+ import ftfy
44
+
45
+ def retrieve_timesteps(
46
+ scheduler,
47
+ num_inference_steps: Optional[int] = None,
48
+ device: Optional[Union[str, torch.device]] = None,
49
+ timesteps: Optional[List[int]] = None,
50
+ **kwargs,
51
+ ):
52
+ """
53
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
54
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
55
+
56
+ Args:
57
+ scheduler (`SchedulerMixin`):
58
+ The scheduler to get timesteps from.
59
+ num_inference_steps (`int`):
60
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
61
+ `timesteps` must be `None`.
62
+ device (`str` or `torch.device`, *optional*):
63
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
64
+ timesteps (`List[int]`, *optional*):
65
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
66
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
67
+ must be `None`.
68
+
69
+ Returns:
70
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
71
+ second element is the number of inference steps.
72
+ """
73
+ if timesteps is not None:
74
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
75
+ if not accepts_timesteps:
76
+ raise ValueError(
77
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
78
+ f" timestep schedules. Please check whether you are using the correct scheduler."
79
+ )
80
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
81
+ timesteps = scheduler.timesteps
82
+ num_inference_steps = len(timesteps)
83
+ else:
84
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
85
+ timesteps = scheduler.timesteps
86
+ return timesteps, num_inference_steps
87
+
88
+
89
+ class VideoPixArtAlphaPipeline(DiffusionPipeline):
90
+ r"""
91
+ Pipeline for text-to-image generation using PixArt-Alpha.
92
+
93
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
94
+ library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
95
+
96
+ Args:
97
+ vae ([`AutoencoderKL`]):
98
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
99
+ text_encoder ([`T5EncoderModel`]):
100
+ Frozen text-encoder. PixArt-Alpha uses
101
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the
102
+ [t5-v1_1-xxl](https://huggingface.co/PixArt-alpha/PixArt-alpha/tree/main/t5-v1_1-xxl) variant.
103
+ tokenizer (`T5Tokenizer`):
104
+ Tokenizer of class
105
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
106
+ transformer ([`Transformer2DModel`]):
107
+ A text conditioned `Transformer2DModel` to denoise the encoded image latents.
108
+ scheduler ([`SchedulerMixin`]):
109
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
110
+ """
111
+
112
+ bad_punct_regex = re.compile(
113
+ r"["
114
+ + "#®•©™&@·º½¾¿¡§~"
115
+ + r"\)"
116
+ + r"\("
117
+ + r"\]"
118
+ + r"\["
119
+ + r"\}"
120
+ + r"\{"
121
+ + r"\|"
122
+ + "\\"
123
+ + r"\/"
124
+ + r"\*"
125
+ + r"]{1,}"
126
+ ) # noqa
127
+
128
+ _optional_components = ["tokenizer", "text_encoder"]
129
+ model_cpu_offload_seq = "text_encoder->transformer->vae"
130
+
131
+ def __init__(
132
+ self,
133
+ tokenizer: T5Tokenizer,
134
+ text_encoder: T5EncoderModel,
135
+ vae: AutoencoderKL,
136
+ transformer: Transformer3DModel,
137
+ scheduler: DPMSolverMultistepScheduler,
138
+ patchifier: Patchifier,
139
+ ):
140
+ super().__init__()
141
+
142
+ self.register_modules(
143
+ tokenizer=tokenizer,
144
+ text_encoder=text_encoder,
145
+ vae=vae,
146
+ transformer=transformer,
147
+ scheduler=scheduler,
148
+ patchifier=patchifier,
149
+ )
150
+
151
+ self.video_scale_factor, self.vae_scale_factor, _ = get_vae_size_scale_factor(self.vae)
152
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
153
+
154
+ # Adapted from https://github.com/PixArt-alpha/PixArt-alpha/blob/master/diffusion/model/utils.py
155
+ def mask_text_embeddings(self, emb, mask):
156
+ if emb.shape[0] == 1:
157
+ keep_index = mask.sum().item()
158
+ return emb[:, :, :keep_index, :], keep_index
159
+ else:
160
+ masked_feature = emb * mask[:, None, :, None]
161
+ return masked_feature, emb.shape[2]
162
+
163
+ # Adapted from diffusers.pipelines.deepfloyd_if.pipeline_if.encode_prompt
164
+ def encode_prompt(
165
+ self,
166
+ prompt: Union[str, List[str]],
167
+ do_classifier_free_guidance: bool = True,
168
+ negative_prompt: str = "",
169
+ num_images_per_prompt: int = 1,
170
+ device: Optional[torch.device] = None,
171
+ prompt_embeds: Optional[torch.FloatTensor] = None,
172
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
173
+ prompt_attention_mask: Optional[torch.FloatTensor] = None,
174
+ negative_prompt_attention_mask: Optional[torch.FloatTensor] = None,
175
+ clean_caption: bool = False,
176
+ **kwargs,
177
+ ):
178
+ r"""
179
+ Encodes the prompt into text encoder hidden states.
180
+
181
+ Args:
182
+ prompt (`str` or `List[str]`, *optional*):
183
+ prompt to be encoded
184
+ negative_prompt (`str` or `List[str]`, *optional*):
185
+ The prompt not to guide the image generation. If not defined, one has to pass `negative_prompt_embeds`
186
+ instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`). For
187
+ PixArt-Alpha, this should be "".
188
+ do_classifier_free_guidance (`bool`, *optional*, defaults to `True`):
189
+ whether to use classifier free guidance or not
190
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
191
+ number of images that should be generated per prompt
192
+ device: (`torch.device`, *optional*):
193
+ torch device to place the resulting embeddings on
194
+ prompt_embeds (`torch.FloatTensor`, *optional*):
195
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
196
+ provided, text embeddings will be generated from `prompt` input argument.
197
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
198
+ Pre-generated negative text embeddings. For PixArt-Alpha, it's should be the embeddings of the ""
199
+ string.
200
+ clean_caption (bool, defaults to `False`):
201
+ If `True`, the function will preprocess and clean the provided caption before encoding.
202
+ """
203
+
204
+ if "mask_feature" in kwargs:
205
+ deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version."
206
+ deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False)
207
+
208
+ if device is None:
209
+ device = self._execution_device
210
+
211
+ if prompt is not None and isinstance(prompt, str):
212
+ batch_size = 1
213
+ elif prompt is not None and isinstance(prompt, list):
214
+ batch_size = len(prompt)
215
+ else:
216
+ batch_size = prompt_embeds.shape[0]
217
+
218
+ # See Section 3.1. of the paper.
219
+ # FIXME: to be configured in config not hardecoded. Fix in separate PR with rest of config
220
+ max_length = 128 # TPU supports only lengths multiple of 128
221
+
222
+ if prompt_embeds is None:
223
+ prompt = self._text_preprocessing(prompt, clean_caption=clean_caption)
224
+ text_inputs = self.tokenizer(
225
+ prompt,
226
+ padding="max_length",
227
+ max_length=max_length,
228
+ truncation=True,
229
+ add_special_tokens=True,
230
+ return_tensors="pt",
231
+ )
232
+ text_input_ids = text_inputs.input_ids
233
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
234
+
235
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
236
+ text_input_ids, untruncated_ids
237
+ ):
238
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, max_length - 1 : -1])
239
+ logger.warning(
240
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
241
+ f" {max_length} tokens: {removed_text}"
242
+ )
243
+
244
+ prompt_attention_mask = text_inputs.attention_mask
245
+ prompt_attention_mask = prompt_attention_mask.to(device)
246
+
247
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=prompt_attention_mask)
248
+ prompt_embeds = prompt_embeds[0]
249
+
250
+ if self.text_encoder is not None:
251
+ dtype = self.text_encoder.dtype
252
+ elif self.transformer is not None:
253
+ dtype = self.transformer.dtype
254
+ else:
255
+ dtype = None
256
+
257
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
258
+
259
+ bs_embed, seq_len, _ = prompt_embeds.shape
260
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
261
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
262
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
263
+ prompt_attention_mask = prompt_attention_mask.repeat(1, num_images_per_prompt)
264
+ prompt_attention_mask = prompt_attention_mask.view(bs_embed * num_images_per_prompt, -1)
265
+
266
+ # get unconditional embeddings for classifier free guidance
267
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
268
+ uncond_tokens = [negative_prompt] * batch_size
269
+ uncond_tokens = self._text_preprocessing(uncond_tokens, clean_caption=clean_caption)
270
+ max_length = prompt_embeds.shape[1]
271
+ uncond_input = self.tokenizer(
272
+ uncond_tokens,
273
+ padding="max_length",
274
+ max_length=max_length,
275
+ truncation=True,
276
+ return_attention_mask=True,
277
+ add_special_tokens=True,
278
+ return_tensors="pt",
279
+ )
280
+ negative_prompt_attention_mask = uncond_input.attention_mask
281
+ negative_prompt_attention_mask = negative_prompt_attention_mask.to(device)
282
+
283
+ negative_prompt_embeds = self.text_encoder(
284
+ uncond_input.input_ids.to(device), attention_mask=negative_prompt_attention_mask
285
+ )
286
+ negative_prompt_embeds = negative_prompt_embeds[0]
287
+
288
+ if do_classifier_free_guidance:
289
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
290
+ seq_len = negative_prompt_embeds.shape[1]
291
+
292
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=dtype, device=device)
293
+
294
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
295
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
296
+
297
+ negative_prompt_attention_mask = negative_prompt_attention_mask.repeat(1, num_images_per_prompt)
298
+ negative_prompt_attention_mask = negative_prompt_attention_mask.view(bs_embed * num_images_per_prompt, -1)
299
+ else:
300
+ negative_prompt_embeds = None
301
+ negative_prompt_attention_mask = None
302
+
303
+ return prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask
304
+
305
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
306
+ def prepare_extra_step_kwargs(self, generator, eta):
307
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
308
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
309
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
310
+ # and should be between [0, 1]
311
+
312
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
313
+ extra_step_kwargs = {}
314
+ if accepts_eta:
315
+ extra_step_kwargs["eta"] = eta
316
+
317
+ # check if the scheduler accepts generator
318
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
319
+ if accepts_generator:
320
+ extra_step_kwargs["generator"] = generator
321
+ return extra_step_kwargs
322
+
323
+ def check_inputs(
324
+ self,
325
+ prompt,
326
+ height,
327
+ width,
328
+ negative_prompt,
329
+ prompt_embeds=None,
330
+ negative_prompt_embeds=None,
331
+ prompt_attention_mask=None,
332
+ negative_prompt_attention_mask=None,
333
+ ):
334
+ if height % 8 != 0 or width % 8 != 0:
335
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
336
+
337
+ if prompt is not None and prompt_embeds is not None:
338
+ raise ValueError(
339
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
340
+ " only forward one of the two."
341
+ )
342
+ elif prompt is None and prompt_embeds is None:
343
+ raise ValueError(
344
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
345
+ )
346
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
347
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
348
+
349
+ if prompt is not None and negative_prompt_embeds is not None:
350
+ raise ValueError(
351
+ f"Cannot forward both `prompt`: {prompt} and `negative_prompt_embeds`:"
352
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
353
+ )
354
+
355
+ if negative_prompt is not None and negative_prompt_embeds is not None:
356
+ raise ValueError(
357
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
358
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
359
+ )
360
+
361
+ if prompt_embeds is not None and prompt_attention_mask is None:
362
+ raise ValueError("Must provide `prompt_attention_mask` when specifying `prompt_embeds`.")
363
+
364
+ if negative_prompt_embeds is not None and negative_prompt_attention_mask is None:
365
+ raise ValueError("Must provide `negative_prompt_attention_mask` when specifying `negative_prompt_embeds`.")
366
+
367
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
368
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
369
+ raise ValueError(
370
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
371
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
372
+ f" {negative_prompt_embeds.shape}."
373
+ )
374
+ if prompt_attention_mask.shape != negative_prompt_attention_mask.shape:
375
+ raise ValueError(
376
+ "`prompt_attention_mask` and `negative_prompt_attention_mask` must have the same shape when passed directly, but"
377
+ f" got: `prompt_attention_mask` {prompt_attention_mask.shape} != `negative_prompt_attention_mask`"
378
+ f" {negative_prompt_attention_mask.shape}."
379
+ )
380
+
381
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._text_preprocessing
382
+ def _text_preprocessing(self, text, clean_caption=False):
383
+ if clean_caption and not is_bs4_available():
384
+ logger.warn(BACKENDS_MAPPING["bs4"][-1].format("Setting `clean_caption=True`"))
385
+ logger.warn("Setting `clean_caption` to False...")
386
+ clean_caption = False
387
+
388
+ if clean_caption and not is_ftfy_available():
389
+ logger.warn(BACKENDS_MAPPING["ftfy"][-1].format("Setting `clean_caption=True`"))
390
+ logger.warn("Setting `clean_caption` to False...")
391
+ clean_caption = False
392
+
393
+ if not isinstance(text, (tuple, list)):
394
+ text = [text]
395
+
396
+ def process(text: str):
397
+ if clean_caption:
398
+ text = self._clean_caption(text)
399
+ text = self._clean_caption(text)
400
+ else:
401
+ text = text.lower().strip()
402
+ return text
403
+
404
+ return [process(t) for t in text]
405
+
406
+ # Copied from diffusers.pipelines.deepfloyd_if.pipeline_if.IFPipeline._clean_caption
407
+ def _clean_caption(self, caption):
408
+ caption = str(caption)
409
+ caption = ul.unquote_plus(caption)
410
+ caption = caption.strip().lower()
411
+ caption = re.sub("<person>", "person", caption)
412
+ # urls:
413
+ caption = re.sub(
414
+ r"\b((?:https?:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
415
+ "",
416
+ caption,
417
+ ) # regex for urls
418
+ caption = re.sub(
419
+ r"\b((?:www:(?:\/{1,3}|[a-zA-Z0-9%])|[a-zA-Z0-9.\-]+[.](?:com|co|ru|net|org|edu|gov|it)[\w/-]*\b\/?(?!@)))", # noqa
420
+ "",
421
+ caption,
422
+ ) # regex for urls
423
+ # html:
424
+ caption = BeautifulSoup(caption, features="html.parser").text
425
+
426
+ # @<nickname>
427
+ caption = re.sub(r"@[\w\d]+\b", "", caption)
428
+
429
+ # 31C0—31EF CJK Strokes
430
+ # 31F0—31FF Katakana Phonetic Extensions
431
+ # 3200—32FF Enclosed CJK Letters and Months
432
+ # 3300—33FF CJK Compatibility
433
+ # 3400—4DBF CJK Unified Ideographs Extension A
434
+ # 4DC0—4DFF Yijing Hexagram Symbols
435
+ # 4E00—9FFF CJK Unified Ideographs
436
+ caption = re.sub(r"[\u31c0-\u31ef]+", "", caption)
437
+ caption = re.sub(r"[\u31f0-\u31ff]+", "", caption)
438
+ caption = re.sub(r"[\u3200-\u32ff]+", "", caption)
439
+ caption = re.sub(r"[\u3300-\u33ff]+", "", caption)
440
+ caption = re.sub(r"[\u3400-\u4dbf]+", "", caption)
441
+ caption = re.sub(r"[\u4dc0-\u4dff]+", "", caption)
442
+ caption = re.sub(r"[\u4e00-\u9fff]+", "", caption)
443
+ #######################################################
444
+
445
+ # все виды тире / all types of dash --> "-"
446
+ caption = re.sub(
447
+ r"[\u002D\u058A\u05BE\u1400\u1806\u2010-\u2015\u2E17\u2E1A\u2E3A\u2E3B\u2E40\u301C\u3030\u30A0\uFE31\uFE32\uFE58\uFE63\uFF0D]+", # noqa
448
+ "-",
449
+ caption,
450
+ )
451
+
452
+ # кавычки к одному стандарту
453
+ caption = re.sub(r"[`´«»“”¨]", '"', caption)
454
+ caption = re.sub(r"[‘’]", "'", caption)
455
+
456
+ # &quot;
457
+ caption = re.sub(r"&quot;?", "", caption)
458
+ # &amp
459
+ caption = re.sub(r"&amp", "", caption)
460
+
461
+ # ip adresses:
462
+ caption = re.sub(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", " ", caption)
463
+
464
+ # article ids:
465
+ caption = re.sub(r"\d:\d\d\s+$", "", caption)
466
+
467
+ # \n
468
+ caption = re.sub(r"\\n", " ", caption)
469
+
470
+ # "#123"
471
+ caption = re.sub(r"#\d{1,3}\b", "", caption)
472
+ # "#12345.."
473
+ caption = re.sub(r"#\d{5,}\b", "", caption)
474
+ # "123456.."
475
+ caption = re.sub(r"\b\d{6,}\b", "", caption)
476
+ # filenames:
477
+ caption = re.sub(r"[\S]+\.(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)", "", caption)
478
+
479
+ #
480
+ caption = re.sub(r"[\"\']{2,}", r'"', caption) # """AUSVERKAUFT"""
481
+ caption = re.sub(r"[\.]{2,}", r" ", caption) # """AUSVERKAUFT"""
482
+
483
+ caption = re.sub(self.bad_punct_regex, r" ", caption) # ***AUSVERKAUFT***, #AUSVERKAUFT
484
+ caption = re.sub(r"\s+\.\s+", r" ", caption) # " . "
485
+
486
+ # this-is-my-cute-cat / this_is_my_cute_cat
487
+ regex2 = re.compile(r"(?:\-|\_)")
488
+ if len(re.findall(regex2, caption)) > 3:
489
+ caption = re.sub(regex2, " ", caption)
490
+
491
+ caption = ftfy.fix_text(caption)
492
+ caption = html.unescape(html.unescape(caption))
493
+
494
+ caption = re.sub(r"\b[a-zA-Z]{1,3}\d{3,15}\b", "", caption) # jc6640
495
+ caption = re.sub(r"\b[a-zA-Z]+\d+[a-zA-Z]+\b", "", caption) # jc6640vc
496
+ caption = re.sub(r"\b\d+[a-zA-Z]+\d+\b", "", caption) # 6640vc231
497
+
498
+ caption = re.sub(r"(worldwide\s+)?(free\s+)?shipping", "", caption)
499
+ caption = re.sub(r"(free\s)?download(\sfree)?", "", caption)
500
+ caption = re.sub(r"\bclick\b\s(?:for|on)\s\w+", "", caption)
501
+ caption = re.sub(r"\b(?:png|jpg|jpeg|bmp|webp|eps|pdf|apk|mp4)(\simage[s]?)?", "", caption)
502
+ caption = re.sub(r"\bpage\s+\d+\b", "", caption)
503
+
504
+ caption = re.sub(r"\b\d*[a-zA-Z]+\d+[a-zA-Z]+\d+[a-zA-Z\d]*\b", r" ", caption) # j2d1a2a...
505
+
506
+ caption = re.sub(r"\b\d+\.?\d*[xх×]\d+\.?\d*\b", "", caption)
507
+
508
+ caption = re.sub(r"\b\s+\:\s+", r": ", caption)
509
+ caption = re.sub(r"(\D[,\./])\b", r"\1 ", caption)
510
+ caption = re.sub(r"\s+", " ", caption)
511
+
512
+ caption.strip()
513
+
514
+ caption = re.sub(r"^[\"\']([\w\W]+)[\"\']$", r"\1", caption)
515
+ caption = re.sub(r"^[\'\_,\-\:;]", r"", caption)
516
+ caption = re.sub(r"[\'\_,\-\:\-\+]$", r"", caption)
517
+ caption = re.sub(r"^\.\S+$", "", caption)
518
+
519
+ return caption.strip()
520
+
521
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
522
+ def prepare_latents(
523
+ self,
524
+ batch_size,
525
+ num_latent_channels,
526
+ num_patches,
527
+ dtype,
528
+ device,
529
+ generator,
530
+ latents=None,
531
+ ):
532
+ shape = (
533
+ batch_size,
534
+ num_patches // math.prod(self.patchifier.patch_size),
535
+ num_latent_channels,
536
+ )
537
+
538
+ if isinstance(generator, list) and len(generator) != batch_size:
539
+ raise ValueError(
540
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
541
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
542
+ )
543
+
544
+ if latents is None:
545
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
546
+ else:
547
+ latents = latents.to(device)
548
+
549
+ # scale the initial noise by the standard deviation required by the scheduler
550
+ latents = latents * self.scheduler.init_noise_sigma
551
+ return latents
552
+
553
+ @staticmethod
554
+ def classify_height_width_bin(height: int, width: int, ratios: dict) -> Tuple[int, int]:
555
+ """Returns binned height and width."""
556
+ ar = float(height / width)
557
+ closest_ratio = min(ratios.keys(), key=lambda ratio: abs(float(ratio) - ar))
558
+ default_hw = ratios[closest_ratio]
559
+ return int(default_hw[0]), int(default_hw[1])
560
+
561
+ @staticmethod
562
+ def resize_and_crop_tensor(samples: torch.Tensor, new_width: int, new_height: int) -> torch.Tensor:
563
+ n_frames, orig_height, orig_width = samples.shape[-3:]
564
+
565
+ # Check if resizing is needed
566
+ if orig_height != new_height or orig_width != new_width:
567
+ ratio = max(new_height / orig_height, new_width / orig_width)
568
+ resized_width = int(orig_width * ratio)
569
+ resized_height = int(orig_height * ratio)
570
+
571
+ # Resize
572
+ samples = rearrange(samples, "b c n h w -> (b n) c h w")
573
+ samples = F.interpolate(samples, size=(resized_height, resized_width), mode="bilinear", align_corners=False)
574
+ samples = rearrange(samples, "(b n) c h w -> b c n h w", n=n_frames)
575
+
576
+ # Center Crop
577
+ start_x = (resized_width - new_width) // 2
578
+ end_x = start_x + new_width
579
+ start_y = (resized_height - new_height) // 2
580
+ end_y = start_y + new_height
581
+ samples = samples[..., start_y:end_y, start_x:end_x]
582
+
583
+ return samples
584
+
585
+
586
+ @torch.no_grad()
587
+ def __call__(
588
+ self,
589
+ height: int,
590
+ width: int,
591
+ num_frames: int,
592
+ frame_rate: float,
593
+ prompt: Union[str, List[str]] = None,
594
+ negative_prompt: str = "",
595
+ num_inference_steps: int = 20,
596
+ timesteps: List[int] = None,
597
+ guidance_scale: float = 4.5,
598
+ num_images_per_prompt: Optional[int] = 1,
599
+ eta: float = 0.0,
600
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
601
+ latents: Optional[torch.FloatTensor] = None,
602
+ prompt_embeds: Optional[torch.FloatTensor] = None,
603
+ prompt_attention_mask: Optional[torch.FloatTensor] = None,
604
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
605
+ negative_prompt_attention_mask: Optional[torch.FloatTensor] = None,
606
+ output_type: Optional[str] = "pil",
607
+ return_dict: bool = True,
608
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
609
+ clean_caption: bool = True,
610
+ **kwargs,
611
+ ) -> Union[ImagePipelineOutput, Tuple]:
612
+ """
613
+ Function invoked when calling the pipeline for generation.
614
+
615
+ Args:
616
+ prompt (`str` or `List[str]`, *optional*):
617
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
618
+ instead.
619
+ negative_prompt (`str` or `List[str]`, *optional*):
620
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
621
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
622
+ less than `1`).
623
+ num_inference_steps (`int`, *optional*, defaults to 100):
624
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
625
+ expense of slower inference.
626
+ timesteps (`List[int]`, *optional*):
627
+ Custom timesteps to use for the denoising process. If not defined, equal spaced `num_inference_steps`
628
+ timesteps are used. Must be in descending order.
629
+ guidance_scale (`float`, *optional*, defaults to 4.5):
630
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
631
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
632
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
633
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
634
+ usually at the expense of lower image quality.
635
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
636
+ The number of images to generate per prompt.
637
+ height (`int`, *optional*, defaults to self.unet.config.sample_size):
638
+ The height in pixels of the generated image.
639
+ width (`int`, *optional*, defaults to self.unet.config.sample_size):
640
+ The width in pixels of the generated image.
641
+ eta (`float`, *optional*, defaults to 0.0):
642
+ Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
643
+ [`schedulers.DDIMScheduler`], will be ignored for others.
644
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
645
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
646
+ to make generation deterministic.
647
+ latents (`torch.FloatTensor`, *optional*):
648
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
649
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
650
+ tensor will ge generated by sampling using the supplied random `generator`.
651
+ prompt_embeds (`torch.FloatTensor`, *optional*):
652
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
653
+ provided, text embeddings will be generated from `prompt` input argument.
654
+ prompt_attention_mask (`torch.FloatTensor`, *optional*): Pre-generated attention mask for text embeddings.
655
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
656
+ Pre-generated negative text embeddings. For PixArt-Alpha this negative prompt should be "". If not
657
+ provided, negative_prompt_embeds will be generated from `negative_prompt` input argument.
658
+ negative_prompt_attention_mask (`torch.FloatTensor`, *optional*):
659
+ Pre-generated attention mask for negative text embeddings.
660
+ output_type (`str`, *optional*, defaults to `"pil"`):
661
+ The output format of the generate image. Choose between
662
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
663
+ return_dict (`bool`, *optional*, defaults to `True`):
664
+ Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple.
665
+ callback_on_step_end (`Callable`, *optional*):
666
+ A function that calls at the end of each denoising steps during the inference. The function is called
667
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
668
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
669
+ `callback_on_step_end_tensor_inputs`.
670
+ clean_caption (`bool`, *optional*, defaults to `True`):
671
+ Whether or not to clean the caption before creating embeddings. Requires `beautifulsoup4` and `ftfy` to
672
+ be installed. If the dependencies are not installed, the embeddings will be created from the raw
673
+ prompt.
674
+ use_resolution_binning (`bool` defaults to `True`):
675
+ If set to `True`, the requested height and width are first mapped to the closest resolutions using
676
+ `ASPECT_RATIO_1024_BIN`. After the produced latents are decoded into images, they are resized back to
677
+ the requested resolution. Useful for generating non-square images.
678
+
679
+ Examples:
680
+
681
+ Returns:
682
+ [`~pipelines.ImagePipelineOutput`] or `tuple`:
683
+ If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is
684
+ returned where the first element is a list with the generated images
685
+ """
686
+ if "mask_feature" in kwargs:
687
+ deprecation_message = "The use of `mask_feature` is deprecated. It is no longer used in any computation and that doesn't affect the end results. It will be removed in a future version."
688
+ deprecate("mask_feature", "1.0.0", deprecation_message, standard_warn=False)
689
+
690
+ is_video = kwargs.get("is_video", False)
691
+ self.check_inputs(
692
+ prompt,
693
+ height,
694
+ width,
695
+ negative_prompt,
696
+ prompt_embeds,
697
+ negative_prompt_embeds,
698
+ prompt_attention_mask,
699
+ negative_prompt_attention_mask,
700
+ )
701
+
702
+ # 2. Default height and width to transformer
703
+ if prompt is not None and isinstance(prompt, str):
704
+ batch_size = 1
705
+ elif prompt is not None and isinstance(prompt, list):
706
+ batch_size = len(prompt)
707
+ else:
708
+ batch_size = prompt_embeds.shape[0]
709
+
710
+ device = self._execution_device
711
+
712
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
713
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
714
+ # corresponds to doing no classifier free guidance.
715
+ do_classifier_free_guidance = guidance_scale > 1.0
716
+
717
+ # 3. Encode input prompt
718
+ (
719
+ prompt_embeds,
720
+ prompt_attention_mask,
721
+ negative_prompt_embeds,
722
+ negative_prompt_attention_mask,
723
+ ) = self.encode_prompt(
724
+ prompt,
725
+ do_classifier_free_guidance,
726
+ negative_prompt=negative_prompt,
727
+ num_images_per_prompt=num_images_per_prompt,
728
+ device=device,
729
+ prompt_embeds=prompt_embeds,
730
+ negative_prompt_embeds=negative_prompt_embeds,
731
+ prompt_attention_mask=prompt_attention_mask,
732
+ negative_prompt_attention_mask=negative_prompt_attention_mask,
733
+ clean_caption=clean_caption,
734
+ )
735
+ if do_classifier_free_guidance:
736
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
737
+ prompt_attention_mask = torch.cat([negative_prompt_attention_mask, prompt_attention_mask], dim=0)
738
+
739
+ # 4. Prepare latents.
740
+ self.video_scale_factor = self.video_scale_factor if is_video else 1
741
+ latent_height = height // self.vae_scale_factor
742
+ latent_width = width // self.vae_scale_factor
743
+ latent_num_frames = num_frames // self.video_scale_factor
744
+ if isinstance(self.vae, CausalVideoAutoencoder) and is_video:
745
+ latent_num_frames += 1
746
+ latent_frame_rate = frame_rate / self.video_scale_factor
747
+ num_latent_patches = latent_height * latent_width * latent_num_frames
748
+ latents = self.prepare_latents(
749
+ batch_size=batch_size * num_images_per_prompt,
750
+ num_latent_channels=self.transformer.config.in_channels,
751
+ num_patches=num_latent_patches,
752
+ dtype=prompt_embeds.dtype,
753
+ device=device,
754
+ generator=generator,
755
+ )
756
+
757
+ # 5. Prepare timesteps
758
+ retrieve_timesteps_kwargs = {}
759
+ if isinstance(self.scheduler, TimestepShifter):
760
+ retrieve_timesteps_kwargs["samples"] = latents
761
+ timesteps, num_inference_steps = retrieve_timesteps(
762
+ self.scheduler, num_inference_steps, device, timesteps, **retrieve_timesteps_kwargs
763
+ )
764
+
765
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
766
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
767
+
768
+ # 7. Denoising loop
769
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
770
+
771
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
772
+ for i, t in enumerate(timesteps):
773
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
774
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
775
+
776
+ latent_frame_rates = (
777
+ torch.ones(latent_model_input.shape[0], 1, device=latent_model_input.device) * latent_frame_rate
778
+ )
779
+
780
+ current_timestep = t
781
+ if not torch.is_tensor(current_timestep):
782
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
783
+ # This would be a good case for the `match` statement (Python 3.10+)
784
+ is_mps = latent_model_input.device.type == "mps"
785
+ if isinstance(current_timestep, float):
786
+ dtype = torch.float32 if is_mps else torch.float64
787
+ else:
788
+ dtype = torch.int32 if is_mps else torch.int64
789
+ current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device)
790
+ elif len(current_timestep.shape) == 0:
791
+ current_timestep = current_timestep[None].to(latent_model_input.device)
792
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
793
+ current_timestep = current_timestep.expand(latent_model_input.shape[0])
794
+ scale_grid = (
795
+ (1 / latent_frame_rates, self.vae_scale_factor, self.vae_scale_factor)
796
+ if self.transformer.use_rope
797
+ else None
798
+ )
799
+ indices_grid = self.patchifier.get_grid(
800
+ orig_num_frames=latent_num_frames,
801
+ orig_height=latent_height,
802
+ orig_width=latent_width,
803
+ batch_size=latent_model_input.shape[0],
804
+ scale_grid=scale_grid,
805
+ device=latents.device,
806
+ )
807
+
808
+ # predict noise model_output
809
+ noise_pred = self.transformer(
810
+ latent_model_input.to(self.transformer.dtype),
811
+ indices_grid,
812
+ encoder_hidden_states=prompt_embeds.to(self.transformer.dtype),
813
+ encoder_attention_mask=prompt_attention_mask,
814
+ timestep=current_timestep,
815
+ return_dict=False,
816
+ )[0]
817
+
818
+ # perform guidance
819
+ if do_classifier_free_guidance:
820
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
821
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
822
+
823
+ # learned sigma
824
+ if self.transformer.config.out_channels // 2 == self.transformer.config.in_channels:
825
+ noise_pred = noise_pred.chunk(2, dim=1)[0]
826
+
827
+ # compute previous image: x_t -> x_t-1
828
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
829
+
830
+ # call the callback, if provided
831
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
832
+ progress_bar.update()
833
+
834
+ if callback_on_step_end is not None:
835
+ callback_on_step_end(self, i, t, {})
836
+
837
+ latents = self.patchifier.unpatchify(
838
+ latents=latents,
839
+ output_height=latent_height,
840
+ output_width=latent_width,
841
+ output_num_frames=latent_num_frames,
842
+ out_channels=self.transformer.in_channels // math.prod(self.patchifier.patch_size),
843
+ )
844
+ if output_type != "latent":
845
+ image = vae_decode(
846
+ latents, self.vae, is_video, vae_per_channel_normalize=kwargs["vae_per_channel_normalize"]
847
+ )
848
+ image = self.image_processor.postprocess(image, output_type=output_type)
849
+
850
+ else:
851
+ image = latents
852
+
853
+ # Offload all models
854
+ self.maybe_free_model_hooks()
855
+
856
+ if not return_dict:
857
+ return (image,)
858
+
859
+ return ImagePipelineOutput(images=image)
xora/schedulers/__init__.py ADDED
File without changes
xora/schedulers/rf.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from abc import ABC, abstractmethod
3
+ from dataclasses import dataclass
4
+ from typing import Callable, Optional, Tuple, Union
5
+
6
+ import torch
7
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
8
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
9
+ from diffusers.utils import BaseOutput
10
+ from torch import Tensor
11
+
12
+ from xora.utils.torch_utils import append_dims
13
+
14
+
15
+ def simple_diffusion_resolution_dependent_timestep_shift(
16
+ samples: Tensor,
17
+ timesteps: Tensor,
18
+ n: int = 32 * 32,
19
+ ) -> Tensor:
20
+ if len(samples.shape) == 3:
21
+ _, m, _ = samples.shape
22
+ elif len(samples.shape) in [4, 5]:
23
+ m = math.prod(samples.shape[2:])
24
+ else:
25
+ raise ValueError("Samples must have shape (b, t, c), (b, c, h, w) or (b, c, f, h, w)")
26
+ snr = (timesteps / (1 - timesteps)) ** 2
27
+ shift_snr = torch.log(snr) + 2 * math.log(m / n)
28
+ shifted_timesteps = torch.sigmoid(0.5 * shift_snr)
29
+
30
+ return shifted_timesteps
31
+
32
+
33
+ def time_shift(mu: float, sigma: float, t: Tensor):
34
+ return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
35
+
36
+
37
+ def get_normal_shift(
38
+ n_tokens: int,
39
+ min_tokens: int = 1024,
40
+ max_tokens: int = 4096,
41
+ min_shift: float = 0.95,
42
+ max_shift: float = 2.05,
43
+ ) -> Callable[[float], float]:
44
+ m = (max_shift - min_shift) / (max_tokens - min_tokens)
45
+ b = min_shift - m * min_tokens
46
+ return m * n_tokens + b
47
+
48
+
49
+ def sd3_resolution_dependent_timestep_shift(samples: Tensor, timesteps: Tensor) -> Tensor:
50
+ """
51
+ Shifts the timestep schedule as a function of the generated resolution.
52
+
53
+ In the SD3 paper, the authors empirically how to shift the timesteps based on the resolution of the target images.
54
+ For more details: https://arxiv.org/pdf/2403.03206
55
+
56
+ In Flux they later propose a more dynamic resolution dependent timestep shift, see:
57
+ https://github.com/black-forest-labs/flux/blob/87f6fff727a377ea1c378af692afb41ae84cbe04/src/flux/sampling.py#L66
58
+
59
+
60
+ Args:
61
+ samples (Tensor): A batch of samples with shape (batch_size, channels, height, width) or
62
+ (batch_size, channels, frame, height, width).
63
+ timesteps (Tensor): A batch of timesteps with shape (batch_size,).
64
+
65
+ Returns:
66
+ Tensor: The shifted timesteps.
67
+ """
68
+ if len(samples.shape) == 3:
69
+ _, m, _ = samples.shape
70
+ elif len(samples.shape) in [4, 5]:
71
+ m = math.prod(samples.shape[2:])
72
+ else:
73
+ raise ValueError("Samples must have shape (b, t, c), (b, c, h, w) or (b, c, f, h, w)")
74
+
75
+ shift = get_normal_shift(m)
76
+ return time_shift(shift, 1, timesteps)
77
+
78
+
79
+ class TimestepShifter(ABC):
80
+ @abstractmethod
81
+ def shift_timesteps(self, samples: Tensor, timesteps: Tensor) -> Tensor:
82
+ pass
83
+
84
+
85
+ @dataclass
86
+ class RectifiedFlowSchedulerOutput(BaseOutput):
87
+ """
88
+ Output class for the scheduler's step function output.
89
+
90
+ Args:
91
+ prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
92
+ Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
93
+ denoising loop.
94
+ pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
95
+ The predicted denoised sample (x_{0}) based on the model output from the current timestep.
96
+ `pred_original_sample` can be used to preview progress or for guidance.
97
+ """
98
+
99
+ prev_sample: torch.FloatTensor
100
+ pred_original_sample: Optional[torch.FloatTensor] = None
101
+
102
+
103
+ class RectifiedFlowScheduler(SchedulerMixin, ConfigMixin, TimestepShifter):
104
+ order = 1
105
+
106
+ @register_to_config
107
+ def __init__(self, num_train_timesteps=1000, shifting: Optional[str] = None, base_resolution: int = 32**2):
108
+ super().__init__()
109
+ self.init_noise_sigma = 1.0
110
+ self.num_inference_steps = None
111
+ self.timesteps = self.sigmas = torch.linspace(1, 1 / num_train_timesteps, num_train_timesteps)
112
+ self.delta_timesteps = self.timesteps - torch.cat([self.timesteps[1:], torch.zeros_like(self.timesteps[-1:])])
113
+ self.shifting = shifting
114
+ self.base_resolution = base_resolution
115
+
116
+ def shift_timesteps(self, samples: Tensor, timesteps: Tensor) -> Tensor:
117
+ if self.shifting == "SD3":
118
+ return sd3_resolution_dependent_timestep_shift(samples, timesteps)
119
+ elif self.shifting == "SimpleDiffusion":
120
+ return simple_diffusion_resolution_dependent_timestep_shift(samples, timesteps, self.base_resolution)
121
+ return timesteps
122
+
123
+ def set_timesteps(self, num_inference_steps: int, samples: Tensor, device: Union[str, torch.device] = None):
124
+ """
125
+ Sets the discrete timesteps used for the diffusion chain. Supporting function to be run before inference.
126
+
127
+ Args:
128
+ num_inference_steps (`int`): The number of diffusion steps used when generating samples.
129
+ samples (`Tensor`): A batch of samples with shape.
130
+ device (`Union[str, torch.device]`, *optional*): The device to which the timesteps tensor will be moved.
131
+ """
132
+ num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps)
133
+ timesteps = torch.linspace(1, 1 / num_inference_steps, num_inference_steps).to(device)
134
+ self.timesteps = self.shift_timesteps(samples, timesteps)
135
+ self.delta_timesteps = self.timesteps - torch.cat([self.timesteps[1:], torch.zeros_like(self.timesteps[-1:])])
136
+ self.num_inference_steps = num_inference_steps
137
+ self.sigmas = self.timesteps
138
+
139
+ def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor:
140
+ # pylint: disable=unused-argument
141
+ """
142
+ Ensures interchangeability with schedulers that need to scale the denoising model input depending on the
143
+ current timestep.
144
+
145
+ Args:
146
+ sample (`torch.FloatTensor`): input sample
147
+ timestep (`int`, optional): current timestep
148
+
149
+ Returns:
150
+ `torch.FloatTensor`: scaled input sample
151
+ """
152
+ return sample
153
+
154
+ def step(
155
+ self,
156
+ model_output: torch.FloatTensor,
157
+ timestep: torch.FloatTensor,
158
+ sample: torch.FloatTensor,
159
+ eta: float = 0.0,
160
+ use_clipped_model_output: bool = False,
161
+ generator=None,
162
+ variance_noise: Optional[torch.FloatTensor] = None,
163
+ return_dict: bool = True,
164
+ ) -> Union[RectifiedFlowSchedulerOutput, Tuple]:
165
+ # pylint: disable=unused-argument
166
+ """
167
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
168
+ process from the learned model outputs (most often the predicted noise).
169
+
170
+ Args:
171
+ model_output (`torch.FloatTensor`):
172
+ The direct output from learned diffusion model.
173
+ timestep (`float`):
174
+ The current discrete timestep in the diffusion chain.
175
+ sample (`torch.FloatTensor`):
176
+ A current instance of a sample created by the diffusion process.
177
+ eta (`float`):
178
+ The weight of noise for added noise in diffusion step.
179
+ use_clipped_model_output (`bool`, defaults to `False`):
180
+ If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
181
+ because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
182
+ clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
183
+ `use_clipped_model_output` has no effect.
184
+ generator (`torch.Generator`, *optional*):
185
+ A random number generator.
186
+ variance_noise (`torch.FloatTensor`):
187
+ Alternative to generating noise with `generator` by directly providing the noise for the variance
188
+ itself. Useful for methods such as [`CycleDiffusion`].
189
+ return_dict (`bool`, *optional*, defaults to `True`):
190
+ Whether or not to return a [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`.
191
+
192
+ Returns:
193
+ [`~schedulers.scheduling_utils.RectifiedFlowSchedulerOutput`] or `tuple`:
194
+ If return_dict is `True`, [`~schedulers.rf_scheduler.RectifiedFlowSchedulerOutput`] is returned,
195
+ otherwise a tuple is returned where the first element is the sample tensor.
196
+ """
197
+ if self.num_inference_steps is None:
198
+ raise ValueError(
199
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
200
+ )
201
+
202
+ current_index = (self.timesteps - timestep).abs().argmin()
203
+ dt = self.delta_timesteps.gather(0, current_index.unsqueeze(0))
204
+
205
+ prev_sample = sample - dt * model_output
206
+
207
+ if not return_dict:
208
+ return (prev_sample,)
209
+
210
+ return RectifiedFlowSchedulerOutput(prev_sample=prev_sample)
211
+
212
+ def add_noise(
213
+ self,
214
+ original_samples: torch.FloatTensor,
215
+ noise: torch.FloatTensor,
216
+ timesteps: torch.FloatTensor,
217
+ ) -> torch.FloatTensor:
218
+ sigmas = timesteps
219
+ sigmas = append_dims(sigmas, original_samples.ndim)
220
+ alphas = 1 - sigmas
221
+ noisy_samples = alphas * original_samples + sigmas * noise
222
+ return noisy_samples
xora/utils/__init__.py ADDED
File without changes
xora/utils/torch_utils.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ def append_dims(x: torch.Tensor, target_dims: int) -> torch.Tensor:
4
+ """Appends dimensions to the end of a tensor until it has target_dims dimensions."""
5
+ dims_to_append = target_dims - x.ndim
6
+ if dims_to_append < 0:
7
+ raise ValueError(f"input has {x.ndim} dims but target_dims is {target_dims}, which is less")
8
+ elif dims_to_append == 0:
9
+ return x
10
+ return x[(...,) + (None,) * dims_to_append]