repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_decoder.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenDecoderPipeline from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WuerstchenDecoderPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenDecoderPipeline params = ["prompt"] batch_params = ["image_embeddings", "prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeddings", "text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "c_cond": self.text_embedder_hidden_size, "c_hidden": [320], "nhead": [-1], "blocks": [4], "level_config": ["CT"], "clip_embd": self.text_embedder_hidden_size, "inject_effnet": [False], } model = WuerstchenDiffNeXt(**model_kwargs) return model.eval() def get_dummy_components(self): decoder = self.dummy_decoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer vqgan = self.dummy_vqgan scheduler = DDPMWuerstchenScheduler() components = { "decoder": decoder, "vqgan": vqgan, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "latent_dim_scale": 4.0, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeddings": torch.ones((1, 4, 4, 4), device=device), "prompt": "horse", "generator": generator, "guidance_scale": 1.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_decoder(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False) image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.0000, 0.0000, 0.0089, 1.0000, 1.0000, 0.3927, 1.0000, 1.0000, 1.0000]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-5) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="bf16 not supported and requires CUDA") def test_float16_inference(self): super().test_float16_inference()
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenCombinedPipeline from diffusers.pipelines.wuerstchen import PaellaVQModel, WuerstchenDiffNeXt, WuerstchenPrior from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class WuerstchenCombinedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenCombinedPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "prior_guidance_scale", "decoder_guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "prior_num_inference_steps", "output_type", "return_dict", ] test_xformers_attention = True @property def text_embedder_hidden_size(self): return 32 @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = {"c_in": 2, "c": 8, "depth": 2, "c_cond": 32, "c_r": 8, "nhead": 2} model = WuerstchenPrior(**model_kwargs) return model.eval() @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_prior_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, projection_dim=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_vqgan(self): torch.manual_seed(0) model_kwargs = { "bottleneck_blocks": 1, "num_vq_embeddings": 2, } model = PaellaVQModel(**model_kwargs) return model.eval() @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "c_cond": self.text_embedder_hidden_size, "c_hidden": [320], "nhead": [-1], "blocks": [4], "level_config": ["CT"], "clip_embd": self.text_embedder_hidden_size, "inject_effnet": [False], } model = WuerstchenDiffNeXt(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior prior_text_encoder = self.dummy_prior_text_encoder scheduler = DDPMWuerstchenScheduler() tokenizer = self.dummy_tokenizer text_encoder = self.dummy_text_encoder decoder = self.dummy_decoder vqgan = self.dummy_vqgan components = { "tokenizer": tokenizer, "text_encoder": text_encoder, "decoder": decoder, "vqgan": vqgan, "scheduler": scheduler, "prior_prior": prior, "prior_text_encoder": prior_text_encoder, "prior_tokenizer": tokenizer, "prior_scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_guidance_scale": 4.0, "decoder_guidance_scale": 4.0, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "np", "height": 128, "width": 128, } return inputs def test_wuerstchen(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[-3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.7616304, 0.0, 1.0, 0.0, 1.0, 0.0, 0.05925313, 0.0, 0.951898]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) @unittest.skip(reason="flakey and float16 requires CUDA") def test_float16_inference(self): super().test_float16_inference() def test_callback_inputs(self): pass def test_callback_cfg(self): pass
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/wuerstchen/test_wuerstchen_prior.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch import torch.nn as nn import torch.nn.functional as F from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import DDPMWuerstchenScheduler, WuerstchenPriorPipeline from diffusers.loaders import AttnProcsLayers from diffusers.models.attention_processor import ( LoRAAttnProcessor, LoRAAttnProcessor2_0, ) from diffusers.pipelines.wuerstchen import WuerstchenPrior from diffusers.utils.import_utils import is_peft_available from diffusers.utils.testing_utils import enable_full_determinism, require_peft_backend, skip_mps, torch_device if is_peft_available(): from peft import LoraConfig from peft.tuners.tuners_utils import BaseTunerLayer from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() def create_prior_lora_layers(unet: nn.Module): lora_attn_procs = {} for name in unet.attn_processors.keys(): lora_attn_processor_class = ( LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor ) lora_attn_procs[name] = lora_attn_processor_class( hidden_size=unet.config.c, ) unet_lora_layers = AttnProcsLayers(lora_attn_procs) return lora_attn_procs, unet_lora_layers class WuerstchenPriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = WuerstchenPriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["text_encoder_hidden_states"] @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config).eval() @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "c_in": 2, "c": 8, "depth": 2, "c_cond": 32, "c_r": 8, "nhead": 2, } model = WuerstchenPrior(**model_kwargs) return model.eval() def get_dummy_components(self): prior = self.dummy_prior text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer scheduler = DDPMWuerstchenScheduler() components = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_wuerstchen_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeddings image_from_tuple = pipe(**self.get_dummy_inputs(device), return_dict=False)[0] image_slice = image[0, 0, 0, -10:] image_from_tuple_slice = image_from_tuple[0, 0, 0, -10:] assert image.shape == (1, 2, 24, 24) expected_slice = np.array( [ -7172.837, -3438.855, -1093.312, 388.8835, -7471.467, -7998.1206, -5328.259, 218.00089, -2731.5745, -8056.734, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 5e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( expected_max_diff=2e-1, ) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) @unittest.skip(reason="flaky for now") def test_float16_inference(self): super().test_float16_inference() # override because we need to make sure latent_mean and latent_std to be 0 def test_callback_inputs(self): components = self.get_dummy_components() components["latent_mean"] = 0 components["latent_std"] = 0 pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 def check_if_lora_correctly_set(self, model) -> bool: """ Checks if the LoRA layers are correctly set with peft """ for module in model.modules(): if isinstance(module, BaseTunerLayer): return True return False def get_lora_components(self): prior = self.dummy_prior prior_lora_config = LoraConfig( r=4, lora_alpha=4, target_modules=["to_q", "to_k", "to_v", "to_out.0"], init_lora_weights=False ) prior_lora_attn_procs, prior_lora_layers = create_prior_lora_layers(prior) lora_components = { "prior_lora_layers": prior_lora_layers, "prior_lora_attn_procs": prior_lora_attn_procs, } return prior, prior_lora_config, lora_components @require_peft_backend def test_inference_with_prior_lora(self): _, prior_lora_config, _ = self.get_lora_components() device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output_no_lora = pipe(**self.get_dummy_inputs(device)) image_embed = output_no_lora.image_embeddings self.assertTrue(image_embed.shape == (1, 2, 24, 24)) pipe.prior.add_adapter(prior_lora_config) self.assertTrue(self.check_if_lora_correctly_set(pipe.prior), "Lora not correctly set in prior") output_lora = pipe(**self.get_dummy_inputs(device)) lora_image_embed = output_lora.image_embeddings self.assertTrue(image_embed.shape == lora_image_embed.shape)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/unclip/test_unclip.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import PriorTransformer, UnCLIPPipeline, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UnCLIPPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = UnCLIPPipeline params = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", "guidance_scale", "prompt_embeds", "cross_attention_kwargs", } batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = [ "generator", "return_dict", "prior_num_inference_steps", "decoder_num_inference_steps", "super_res_num_inference_steps", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) return model @property def dummy_text_proj(self): torch.manual_seed(0) model_kwargs = { "clip_embeddings_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "cross_attention_dim": self.cross_attention_dim, } model = UnCLIPTextProjModel(**model_kwargs) return model @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "sample_size": 32, # RGB in channels "in_channels": 3, # Out channels is double in channels because predicts mean and variance "out_channels": 6, "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": "identity", } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_super_res_kwargs(self): return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "in_channels": 6, "out_channels": 3, } @property def dummy_super_res_first(self): torch.manual_seed(0) model = UNet2DModel(**self.dummy_super_res_kwargs) return model @property def dummy_super_res_last(self): # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1) model = UNet2DModel(**self.dummy_super_res_kwargs) return model def get_dummy_components(self): prior = self.dummy_prior decoder = self.dummy_decoder text_proj = self.dummy_text_proj text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer super_res_first = self.dummy_super_res_first super_res_last = self.dummy_super_res_last prior_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample_range=5.0, ) decoder_scheduler = UnCLIPScheduler( variance_type="learned_range", prediction_type="epsilon", num_train_timesteps=1000, ) super_res_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="epsilon", num_train_timesteps=1000, ) components = { "prior": prior, "decoder": decoder, "text_proj": text_proj, "text_encoder": text_encoder, "tokenizer": tokenizer, "super_res_first": super_res_first, "super_res_last": super_res_last, "prior_scheduler": prior_scheduler, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "prior_num_inference_steps": 2, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "numpy", } return inputs def test_unclip(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [ 0.9997, 0.9988, 0.0028, 0.9997, 0.9984, 0.9965, 0.0029, 0.9986, 0.0025, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_unclip_passed_text_embed(self): device = torch.device("cpu") class DummyScheduler: init_noise_sigma = 1 components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) prior = components["prior"] decoder = components["decoder"] super_res_first = components["super_res_first"] tokenizer = components["tokenizer"] text_encoder = components["text_encoder"] generator = torch.Generator(device=device).manual_seed(0) dtype = prior.dtype batch_size = 1 shape = (batch_size, prior.config.embedding_dim) prior_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) shape = (batch_size, decoder.config.in_channels, decoder.config.sample_size, decoder.config.sample_size) decoder_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) shape = ( batch_size, super_res_first.config.in_channels // 2, super_res_first.config.sample_size, super_res_first.config.sample_size, ) super_res_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) pipe.set_progress_bar_config(disable=None) prompt = "this is a prompt example" generator = torch.Generator(device=device).manual_seed(0) output = pipe( [prompt], generator=generator, prior_num_inference_steps=2, decoder_num_inference_steps=2, super_res_num_inference_steps=2, prior_latents=prior_latents, decoder_latents=decoder_latents, super_res_latents=super_res_latents, output_type="np", ) image = output.images text_inputs = tokenizer( prompt, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ) text_model_output = text_encoder(text_inputs.input_ids) text_attention_mask = text_inputs.attention_mask generator = torch.Generator(device=device).manual_seed(0) image_from_text = pipe( generator=generator, prior_num_inference_steps=2, decoder_num_inference_steps=2, super_res_num_inference_steps=2, prior_latents=prior_latents, decoder_latents=decoder_latents, super_res_latents=super_res_latents, text_model_output=text_model_output, text_attention_mask=text_attention_mask, output_type="np", )[0] # make sure passing text embeddings manually is identical assert np.abs(image - image_from_text).max() < 1e-4 # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because UnCLIP GPU undeterminism requires a looser check. @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference, expected_max_diff=0.01) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because UnCLIP undeterminism requires a looser check. @skip_mps def test_inference_batch_single_identical(self): additional_params_copy_to_batched_inputs = [ "prior_num_inference_steps", "decoder_num_inference_steps", "super_res_num_inference_steps", ] self._test_inference_batch_single_identical( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, expected_max_diff=5e-3 ) def test_inference_batch_consistent(self): additional_params_copy_to_batched_inputs = [ "prior_num_inference_steps", "decoder_num_inference_steps", "super_res_num_inference_steps", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes batch_sizes = [2, 3] self._test_inference_batch_consistent( batch_sizes=batch_sizes, additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs ) @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_local(self): return super().test_save_load_local(expected_max_difference=5e-3) @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @unittest.skip("UnCLIP produces very large differences in fp16 vs fp32. Test is not useful.") def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1.0) @nightly class UnCLIPPipelineCPUIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_unclip_karlo_cpu_fp32(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_horse_cpu.npy" ) pipeline = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha") pipeline.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipeline( "horse", num_images_per_prompt=1, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image).max() < 1e-1 @nightly @require_torch_gpu class UnCLIPPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_unclip_karlo(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_horse_fp16.npy" ) pipeline = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=torch.float16) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) output = pipeline( "horse", generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(image, expected_image) def test_unclip_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = UnCLIPPipeline.from_pretrained("kakaobrain/karlo-v1-alpha", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( "horse", num_images_per_prompt=1, prior_num_inference_steps=2, decoder_num_inference_steps=2, super_res_num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/unclip/test_unclip_image_variation.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( DiffusionPipeline, UnCLIPImageVariationPipeline, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel, ) from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UnCLIPImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = UnCLIPImageVariationPipeline params = IMAGE_VARIATION_PARAMS - {"height", "width", "guidance_scale"} batch_params = IMAGE_VARIATION_BATCH_PARAMS required_optional_params = [ "generator", "return_dict", "decoder_num_inference_steps", "super_res_num_inference_steps", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) return CLIPVisionModelWithProjection(config) @property def dummy_text_proj(self): torch.manual_seed(0) model_kwargs = { "clip_embeddings_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "cross_attention_dim": self.cross_attention_dim, } model = UnCLIPTextProjModel(**model_kwargs) return model @property def dummy_decoder(self): torch.manual_seed(0) model_kwargs = { "sample_size": 32, # RGB in channels "in_channels": 3, # Out channels is double in channels because predicts mean and variance "out_channels": 6, "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": "identity", } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_super_res_kwargs(self): return { "sample_size": 64, "layers_per_block": 1, "down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"), "up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"), "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "in_channels": 6, "out_channels": 3, } @property def dummy_super_res_first(self): torch.manual_seed(0) model = UNet2DModel(**self.dummy_super_res_kwargs) return model @property def dummy_super_res_last(self): # seeded differently to get different unet than `self.dummy_super_res_first` torch.manual_seed(1) model = UNet2DModel(**self.dummy_super_res_kwargs) return model def get_dummy_components(self): decoder = self.dummy_decoder text_proj = self.dummy_text_proj text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer super_res_first = self.dummy_super_res_first super_res_last = self.dummy_super_res_last decoder_scheduler = UnCLIPScheduler( variance_type="learned_range", prediction_type="epsilon", num_train_timesteps=1000, ) super_res_scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="epsilon", num_train_timesteps=1000, ) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) image_encoder = self.dummy_image_encoder return { "decoder": decoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_proj": text_proj, "feature_extractor": feature_extractor, "image_encoder": image_encoder, "super_res_first": super_res_first, "super_res_last": super_res_last, "decoder_scheduler": decoder_scheduler, "super_res_scheduler": super_res_scheduler, } def get_dummy_inputs(self, device, seed=0, pil_image=True): input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) if pil_image: input_image = input_image * 0.5 + 0.5 input_image = input_image.clamp(0, 1) input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy() input_image = DiffusionPipeline.numpy_to_pil(input_image)[0] return { "image": input_image, "generator": generator, "decoder_num_inference_steps": 2, "super_res_num_inference_steps": 2, "output_type": "np", } def test_unclip_image_variation_input_tensor(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) pipeline_inputs = self.get_dummy_inputs(device, pil_image=False) output = pipe(**pipeline_inputs) image = output.images tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=False) image_from_tuple = pipe( **tuple_pipeline_inputs, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [ 0.9997, 0.0002, 0.9997, 0.9997, 0.9969, 0.0023, 0.9997, 0.9969, 0.9970, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_unclip_image_variation_input_image(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) pipeline_inputs = self.get_dummy_inputs(device, pil_image=True) output = pipe(**pipeline_inputs) image = output.images tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=True) image_from_tuple = pipe( **tuple_pipeline_inputs, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_unclip_image_variation_input_list_images(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) pipeline_inputs = self.get_dummy_inputs(device, pil_image=True) pipeline_inputs["image"] = [ pipeline_inputs["image"], pipeline_inputs["image"], ] output = pipe(**pipeline_inputs) image = output.images tuple_pipeline_inputs = self.get_dummy_inputs(device, pil_image=True) tuple_pipeline_inputs["image"] = [ tuple_pipeline_inputs["image"], tuple_pipeline_inputs["image"], ] image_from_tuple = pipe( **tuple_pipeline_inputs, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) expected_slice = np.array( [ 0.9997, 0.9989, 0.0008, 0.0021, 0.9960, 0.0018, 0.0014, 0.0002, 0.9933, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_unclip_passed_image_embed(self): device = torch.device("cpu") class DummyScheduler: init_noise_sigma = 1 components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(0) dtype = pipe.decoder.dtype batch_size = 1 shape = ( batch_size, pipe.decoder.config.in_channels, pipe.decoder.config.sample_size, pipe.decoder.config.sample_size, ) decoder_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) shape = ( batch_size, pipe.super_res_first.config.in_channels // 2, pipe.super_res_first.config.sample_size, pipe.super_res_first.config.sample_size, ) super_res_latents = pipe.prepare_latents( shape, dtype=dtype, device=device, generator=generator, latents=None, scheduler=DummyScheduler() ) pipeline_inputs = self.get_dummy_inputs(device, pil_image=False) img_out_1 = pipe( **pipeline_inputs, decoder_latents=decoder_latents, super_res_latents=super_res_latents ).images pipeline_inputs = self.get_dummy_inputs(device, pil_image=False) # Don't pass image, instead pass embedding image = pipeline_inputs.pop("image") image_embeddings = pipe.image_encoder(image).image_embeds img_out_2 = pipe( **pipeline_inputs, decoder_latents=decoder_latents, super_res_latents=super_res_latents, image_embeddings=image_embeddings, ).images # make sure passing text embeddings manually is identical assert np.abs(img_out_1 - img_out_2).max() < 1e-4 # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because UnCLIP GPU undeterminism requires a looser check. @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" # Check is relaxed because there is not a torch 2.0 sliced attention added kv processor expected_max_diff = 1e-2 self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, expected_max_diff=expected_max_diff ) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because UnCLIP undeterminism requires a looser check. @unittest.skip("UnCLIP produces very large differences. Test is not useful.") @skip_mps def test_inference_batch_single_identical(self): additional_params_copy_to_batched_inputs = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] self._test_inference_batch_single_identical( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, expected_max_diff=5e-3 ) def test_inference_batch_consistent(self): additional_params_copy_to_batched_inputs = [ "decoder_num_inference_steps", "super_res_num_inference_steps", ] if torch_device == "mps": # TODO: MPS errors with larger batch sizes batch_sizes = [2, 3] self._test_inference_batch_consistent( batch_sizes=batch_sizes, additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs, ) else: self._test_inference_batch_consistent( additional_params_copy_to_batched_inputs=additional_params_copy_to_batched_inputs ) @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @unittest.skip("UnCLIP produces very large difference. Test is not useful.") @skip_mps def test_save_load_local(self): return super().test_save_load_local(expected_max_difference=4e-3) @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @unittest.skip("UnCLIP produces very large difference in fp16 vs fp32. Test is not useful.") def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1.0) @nightly @require_torch_gpu class UnCLIPImageVariationPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_unclip_image_variation_karlo(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/unclip/karlo_v1_alpha_cat_variation_fp16.npy" ) pipeline = UnCLIPImageVariationPipeline.from_pretrained( "kakaobrain/karlo-v1-alpha-image-variations", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) output = pipeline( input_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (256, 256, 3) assert_mean_pixel_difference(image, expected_image, 15)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/shap_e/test_shap_e.py
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils.testing_utils import load_numpy, nightly, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class ShapEPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ShapEPipeline params = ["prompt"] batch_params = ["prompt"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 16 @property def time_input_dim(self): return 16 @property def time_embed_dim(self): return self.time_input_dim * 4 @property def renderer_dim(self): return 8 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } model = PriorTransformer(**model_kwargs) return model @property def dummy_renderer(self): torch.manual_seed(0) model_kwargs = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } model = ShapERenderer(**model_kwargs) return model def get_dummy_components(self): prior = self.dummy_prior text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer shap_e_renderer = self.dummy_renderer scheduler = HeunDiscreteScheduler( beta_schedule="exp", num_train_timesteps=1024, prediction_type="sample", use_karras_sigmas=True, clip_sample=True, clip_sample_range=1.0, ) components = { "prior": prior, "text_encoder": text_encoder, "tokenizer": tokenizer, "shap_e_renderer": shap_e_renderer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "latent", } return inputs def test_shap_e(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images[0] image = image.cpu().numpy() image_slice = image[-3:, -3:] assert image.shape == (32, 16) expected_slice = np.array([-1.0000, -0.6241, 1.0000, -0.8978, -0.6866, 0.7876, -0.7473, -0.2874, 0.6103]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_consistent(self): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2]) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=6e-3) def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_size = 1 num_images_per_prompt = 2 inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) @unittest.skip("Key error is raised with accelerate") def test_sequential_cpu_offload_forward_pass(self): pass @nightly @require_torch_gpu class ShapEPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_shap_e(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_np_out.npy" ) pipe = ShapEPipeline.from_pretrained("openai/shap-e") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) images = pipe( "a shark", generator=generator, guidance_scale=15.0, num_inference_steps=64, frame_size=64, output_type="np", ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(images, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/shap_e/test_shap_e_img2img.py
# Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImg2ImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils.testing_utils import ( floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class ShapEImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ShapEImg2ImgPipeline params = ["image"] batch_params = ["image"] required_optional_params = [ "num_images_per_prompt", "num_inference_steps", "generator", "latents", "guidance_scale", "frame_size", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 16 @property def time_input_dim(self): return 16 @property def time_embed_dim(self): return self.time_input_dim * 4 @property def renderer_dim(self): return 8 @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=32, projection_dim=self.text_embedder_hidden_size, intermediate_size=24, num_attention_heads=2, num_channels=3, num_hidden_layers=5, patch_size=1, ) model = CLIPVisionModel(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 16, "embedding_dim": self.time_input_dim, "num_embeddings": 32, "embedding_proj_dim": self.text_embedder_hidden_size, "time_embed_dim": self.time_embed_dim, "num_layers": 1, "clip_embed_dim": self.time_input_dim * 2, "additional_embeddings": 0, "time_embed_act_fn": "gelu", "norm_in_type": "layer", "embedding_proj_norm_type": "layer", "encoder_hid_proj_type": None, "added_emb_type": None, } model = PriorTransformer(**model_kwargs) return model @property def dummy_renderer(self): torch.manual_seed(0) model_kwargs = { "param_shapes": ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), "d_latent": self.time_input_dim, "d_hidden": self.renderer_dim, "n_output": 12, "background": ( 0.1, 0.1, 0.1, ), } model = ShapERenderer(**model_kwargs) return model def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder image_processor = self.dummy_image_processor shap_e_renderer = self.dummy_renderer scheduler = HeunDiscreteScheduler( beta_schedule="exp", num_train_timesteps=1024, prediction_type="sample", use_karras_sigmas=True, clip_sample=True, clip_sample_range=1.0, ) components = { "prior": prior, "image_encoder": image_encoder, "image_processor": image_processor, "shap_e_renderer": shap_e_renderer, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": input_image, "generator": generator, "num_inference_steps": 1, "frame_size": 32, "output_type": "latent", } return inputs def test_shap_e(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images[0] image_slice = image[-3:, -3:].cpu().numpy() assert image.shape == (32, 16) expected_slice = np.array( [-1.0, 0.40668195, 0.57322013, -0.9469888, 0.4283227, 0.30348337, -0.81094897, 0.74555075, 0.15342723] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_consistent(self): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[2]) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical( batch_size=2, expected_max_diff=6e-3, ) def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_size = 1 num_images_per_prompt = 2 inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) @unittest.skip("Key error is raised with accelerate") def test_sequential_cpu_offload_forward_pass(self): pass @nightly @require_torch_gpu class ShapEImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_shap_e_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/corgi.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/shap_e/test_shap_e_img2img_out.npy" ) pipe = ShapEImg2ImgPipeline.from_pretrained("openai/shap-e-img2img") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device=torch_device).manual_seed(0) images = pipe( input_image, generator=generator, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type="np", ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(images, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_unclip/test_stable_unclip_img2img.py
import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImg2ImgPipeline, UNet2DConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableUnCLIPImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableUnCLIPImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): embedder_hidden_size = 32 embedder_projection_dim = embedder_hidden_size # image encoding components feature_extractor = CLIPImageProcessor(crop_size=32, size=32) torch.manual_seed(0) image_encoder = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=embedder_hidden_size, projection_dim=embedder_projection_dim, num_hidden_layers=5, num_attention_heads=4, image_size=32, intermediate_size=37, patch_size=1, ) ) # regular denoising components torch.manual_seed(0) image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") torch.manual_seed(0) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) text_encoder = CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", # The class embeddings are the noise augmented image embeddings. # I.e. the image embeddings concated with the noised embeddings of the same dimension projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=embedder_hidden_size, layers_per_block=1, upcast_attention=True, use_linear_projection=True, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=False, steps_offset=1, ) torch.manual_seed(0) vae = AutoencoderKL() components = { # image encoding components "feature_extractor": feature_extractor, "image_encoder": image_encoder.eval(), # image noising components "image_normalizer": image_normalizer.eval(), "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder.eval(), "unet": unet.eval(), "scheduler": scheduler, "vae": vae.eval(), } return components def get_dummy_inputs(self, device, seed=0, pil_image=True): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) input_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) if pil_image: input_image = input_image * 0.5 + 0.5 input_image = input_image.clamp(0, 1) input_image = input_image.cpu().permute(0, 2, 3, 1).float().numpy() input_image = DiffusionPipeline.numpy_to_pil(input_image)[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def test_image_embeds_none(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableUnCLIPImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs.update({"image_embeds": None}) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because GPU undeterminism requires a looser check. def test_attention_slicing_forward_pass(self): test_max_difference = torch_device in ["cpu", "mps"] self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because undeterminism requires a looser check. def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_max_difference=False) @nightly @require_torch_gpu class StableUnCLIPImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_unclip_l_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe(input_image, "anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_h_img2img(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe(input_image, "anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_img2img_pipeline_with_sequential_cpu_offloading(self): input_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableUnCLIPImg2ImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( input_image, "anime turtle", num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_unclip/test_stable_unclip.py
import gc import unittest import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DDPMScheduler, PriorTransformer, StableUnCLIPPipeline, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class StableUnCLIPPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableUnCLIPPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS # TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false test_xformers_attention = False def get_dummy_components(self): embedder_hidden_size = 32 embedder_projection_dim = embedder_hidden_size # prior components torch.manual_seed(0) prior_tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) prior_text_encoder = CLIPTextModelWithProjection( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=embedder_projection_dim, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) prior = PriorTransformer( num_attention_heads=2, attention_head_dim=12, embedding_dim=embedder_projection_dim, num_layers=1, ) torch.manual_seed(0) prior_scheduler = DDPMScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=5.0, beta_schedule="squaredcos_cap_v2", ) # regular denoising components torch.manual_seed(0) image_normalizer = StableUnCLIPImageNormalizer(embedding_dim=embedder_hidden_size) image_noising_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2") torch.manual_seed(0) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) text_encoder = CLIPTextModel( CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=embedder_hidden_size, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) ) torch.manual_seed(0) unet = UNet2DConditionModel( sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D"), block_out_channels=(32, 64), attention_head_dim=(2, 4), class_embed_type="projection", # The class embeddings are the noise augmented image embeddings. # I.e. the image embeddings concated with the noised embeddings of the same dimension projection_class_embeddings_input_dim=embedder_projection_dim * 2, cross_attention_dim=embedder_hidden_size, layers_per_block=1, upcast_attention=True, use_linear_projection=True, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_schedule="scaled_linear", beta_start=0.00085, beta_end=0.012, prediction_type="v_prediction", set_alpha_to_one=False, steps_offset=1, ) torch.manual_seed(0) vae = AutoencoderKL() components = { # prior components "prior_tokenizer": prior_tokenizer, "prior_text_encoder": prior_text_encoder, "prior": prior, "prior_scheduler": prior_scheduler, # image noising components "image_normalizer": image_normalizer, "image_noising_scheduler": image_noising_scheduler, # regular denoising components "tokenizer": tokenizer, "text_encoder": text_encoder, "unet": unet, "scheduler": scheduler, "vae": vae, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "prior_num_inference_steps": 2, "output_type": "numpy", } return inputs # Overriding PipelineTesterMixin::test_attention_slicing_forward_pass # because UnCLIP GPU undeterminism requires a looser check. def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" self._test_attention_slicing_forward_pass(test_max_difference=test_max_difference) # Overriding PipelineTesterMixin::test_inference_batch_single_identical # because UnCLIP undeterminism requires a looser check. def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @nightly @require_torch_gpu class StableUnCLIPPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_unclip(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" ) pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe("anime turle", generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image) def test_stable_unclip_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() _ = pipe( "anime turtle", prior_num_inference_steps=2, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/audioldm2/test_audioldm2.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( ClapAudioConfig, ClapConfig, ClapFeatureExtractor, ClapModel, ClapTextConfig, GPT2Config, GPT2Model, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, T5Config, T5EncoderModel, T5Tokenizer, ) from diffusers import ( AudioLDM2Pipeline, AudioLDM2ProjectionModel, AudioLDM2UNet2DConditionModel, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AudioLDM2PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AudioLDM2Pipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = AudioLDM2UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=([None, 16, 32], [None, 16, 32]), ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_branch_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, projection_dim=16, ) audio_branch_config = ClapAudioConfig( spec_size=64, window_size=4, num_mel_bins=64, intermediate_size=37, layer_norm_eps=1e-05, depths=[2, 2], num_attention_heads=[2, 2], num_hidden_layers=2, hidden_size=192, projection_dim=16, patch_size=2, patch_stride=2, patch_embed_input_channels=4, ) text_encoder_config = ClapConfig.from_text_audio_configs( text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=16 ) text_encoder = ClapModel(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) feature_extractor = ClapFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 ) torch.manual_seed(0) text_encoder_2_config = T5Config( vocab_size=32100, d_model=32, d_ff=37, d_kv=8, num_heads=2, num_layers=2, ) text_encoder_2 = T5EncoderModel(text_encoder_2_config) tokenizer_2 = T5Tokenizer.from_pretrained("hf-internal-testing/tiny-random-T5Model", model_max_length=77) torch.manual_seed(0) language_model_config = GPT2Config( n_embd=16, n_head=2, n_layer=2, vocab_size=1000, n_ctx=99, n_positions=99, ) language_model = GPT2Model(language_model_config) language_model.config.max_new_tokens = 8 torch.manual_seed(0) projection_model = AudioLDM2ProjectionModel(text_encoder_dim=16, text_encoder_1_dim=32, langauge_model_dim=16) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "text_encoder_2": text_encoder_2, "tokenizer": tokenizer, "tokenizer_2": tokenizer_2, "feature_extractor": feature_extractor, "language_model": language_model, "projection_model": projection_model, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_audioldm2_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = audioldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [0.0025, 0.0018, 0.0018, -0.0023, -0.0026, -0.0020, -0.0026, -0.0021, -0.0027, -0.0020] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = audioldm_pipe.tokenizer( prompt, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) inputs["prompt_embeds"] = t5_prompt_embeds inputs["generated_prompt_embeds"] = generated_prompt_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm2_negative_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] generated_embeds = [] for p in [prompt, negative_prompt]: text_inputs = audioldm_pipe.tokenizer( p, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) clap_prompt_embeds = audioldm_pipe.text_encoder.get_text_features(text_inputs) clap_prompt_embeds = clap_prompt_embeds[:, None, :] text_inputs = audioldm_pipe.tokenizer_2( prompt, padding="max_length", max_length=True if len(embeds) == 0 else embeds[0].shape[1], truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) t5_prompt_embeds = audioldm_pipe.text_encoder_2( text_inputs, ) t5_prompt_embeds = t5_prompt_embeds[0] projection_embeds = audioldm_pipe.projection_model(clap_prompt_embeds, t5_prompt_embeds)[0] generated_prompt_embeds = audioldm_pipe.generate_language_model(projection_embeds, max_new_tokens=8) embeds.append(t5_prompt_embeds) generated_embeds.append(generated_prompt_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds inputs["generated_prompt_embeds"], inputs["negative_generated_prompt_embeds"] = generated_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm2_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = audioldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [0.0025, 0.0018, 0.0018, -0.0023, -0.0026, -0.0020, -0.0026, -0.0021, -0.0027, -0.0020] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_audioldm2_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = audioldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = audioldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_audioldm2_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = audioldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = audioldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_audioldm2_vocoder_model_in_dim(self): components = self.get_dummy_components() audioldm_pipe = AudioLDM2Pipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = audioldm_pipe.vocoder.config config.model_in_dim *= 2 audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) @unittest.skip("Raises a not implemented error in AudioLDM2") def test_xformers_attention_forwardGenerator_pass(self): pass def test_dict_tuple_outputs_equivalent(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_dict_tuple_outputs_equivalent(expected_max_difference=2e-4) def test_inference_batch_single_identical(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model self._test_inference_batch_single_identical(expected_max_diff=2e-4) def test_save_load_local(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_local(expected_max_difference=2e-4) def test_save_load_optional_components(self): # increase tolerance from 1e-4 -> 2e-4 to account for large composite model super().test_save_load_optional_components(expected_max_difference=2e-4) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # The method component.dtype returns the dtype of the first parameter registered in the model, not the # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} # Without the logit scale parameters, everything is float32 model_dtypes.pop("text_encoder") self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # the CLAP sub-models are float32 model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # Once we send to fp16, all params are in half-precision, including the logit scale pipe.to(torch_dtype=torch.float16) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) def test_sequential_cpu_offload_forward_pass(self): pass @nightly class AudioLDM2PipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm2(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[17275:17285] expected_slice = np.array([0.0791, 0.0666, 0.1158, 0.1227, 0.1171, -0.2880, -0.1940, -0.0283, -0.0126, 0.1127]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_lms(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2") audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[31390:31400] expected_slice = np.array( [-0.1318, -0.0577, 0.0446, -0.0573, 0.0659, 0.1074, -0.2600, 0.0080, -0.2190, -0.4301] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_audioldm2_large(self): audioldm_pipe = AudioLDM2Pipeline.from_pretrained("cvssp/audioldm2-large") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8825:8835] expected_slice = np.array( [-0.1829, -0.1461, 0.0759, -0.1493, -0.1396, 0.5783, 0.3001, -0.3038, -0.0639, -0.2244] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/unidiffuser/test_unidiffuser.py
import gc import random import traceback import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection, GPT2Tokenizer, ) from diffusers import ( AutoencoderKL, DPMSolverMultistepScheduler, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, require_torch_2, require_torch_gpu, run_test_in_subprocess, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_unidiffuser_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") # pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.set_progress_bar_config(disable=None) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) assert np.abs(image_slice - expected_slice).max() < 1e-1 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class UniDiffuserPipelineFastTests( PipelineTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = UniDiffuserPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS # vae_latents, not latents, is the argument that corresponds to VAE latent inputs image_latents_params = frozenset(["vae_latents"]) def get_dummy_components(self): unet = UniDiffuserModel.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="unet", ) scheduler = DPMSolverMultistepScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", solver_order=3, ) vae = AutoencoderKL.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="vae", ) text_encoder = CLIPTextModel.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="text_encoder", ) clip_tokenizer = CLIPTokenizer.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="clip_tokenizer", ) image_encoder = CLIPVisionModelWithProjection.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="image_encoder", ) # From the Stable Diffusion Image Variation pipeline tests clip_image_processor = CLIPImageProcessor(crop_size=32, size=32) # image_processor = CLIPImageProcessor.from_pretrained("hf-internal-testing/tiny-random-clip") text_tokenizer = GPT2Tokenizer.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="text_tokenizer", ) text_decoder = UniDiffuserTextDecoder.from_pretrained( "hf-internal-testing/unidiffuser-diffusers-test", subfolder="text_decoder", ) components = { "vae": vae, "text_encoder": text_encoder, "image_encoder": image_encoder, "clip_image_processor": clip_image_processor, "clip_tokenizer": clip_tokenizer, "text_decoder": text_decoder, "text_tokenizer": text_tokenizer, "unet": unet, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_fixed_latents(self, device, seed=0): if isinstance(device, str): device = torch.device(device) generator = torch.Generator(device=device).manual_seed(seed) # Hardcode the shapes for now. prompt_latents = randn_tensor((1, 77, 32), generator=generator, device=device, dtype=torch.float32) vae_latents = randn_tensor((1, 4, 16, 16), generator=generator, device=device, dtype=torch.float32) clip_latents = randn_tensor((1, 1, 32), generator=generator, device=device, dtype=torch.float32) latents = { "prompt_latents": prompt_latents, "vae_latents": vae_latents, "clip_latents": clip_latents, } return latents def get_dummy_inputs_with_latents(self, device, seed=0): # image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) # image = image.cpu().permute(0, 2, 3, 1)[0] # image = Image.fromarray(np.uint8(image)).convert("RGB") image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg", ) image = image.resize((32, 32)) latents = self.get_fixed_latents(device, seed=seed) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "prompt_latents": latents.get("prompt_latents"), "vae_latents": latents.get("vae_latents"), "clip_latents": latents.get("clip_latents"), } return inputs def test_unidiffuser_default_joint_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" # inputs = self.get_dummy_inputs(device) inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_joint_no_cfg_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" # inputs = self.get_dummy_inputs(device) inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] # Set guidance scale to 1.0 to turn off CFG inputs["guidance_scale"] = 1.0 sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_text2img_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete image for text-conditioned image generation del inputs["image"] image = unidiffuser_pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_unidiffuser_default_image_0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img' unidiffuser_pipe.set_image_mode() assert unidiffuser_pipe.mode == "img" inputs = self.get_dummy_inputs(device) # Delete prompt and image for unconditional ("marginal") text generation. del inputs["prompt"] del inputs["image"] image = unidiffuser_pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5760, 0.6270, 0.6571, 0.4966, 0.4638, 0.5663, 0.5254, 0.5068, 0.5715]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_unidiffuser_default_text_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img' unidiffuser_pipe.set_text_mode() assert unidiffuser_pipe.mode == "text" inputs = self.get_dummy_inputs(device) # Delete prompt and image for unconditional ("marginal") text generation. del inputs["prompt"] del inputs["image"] text = unidiffuser_pipe(**inputs).text expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_img2text_v0(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete text for image-conditioned text generation del inputs["prompt"] text = unidiffuser_pipe(**inputs).text expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_joint_v1(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" # inputs = self.get_dummy_inputs(device) inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] inputs["data_type"] = 1 sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5760, 0.6270, 0.6571, 0.4965, 0.4638, 0.5663, 0.5254, 0.5068, 0.5716]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_default_text2img_v1(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete image for text-conditioned image generation del inputs["image"] image = unidiffuser_pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5758, 0.6269, 0.6570, 0.4967, 0.4639, 0.5664, 0.5257, 0.5067, 0.5715]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_unidiffuser_default_img2text_v1(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unidiffuser_pipe = UniDiffuserPipeline.from_pretrained("hf-internal-testing/unidiffuser-test-v1") unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete text for image-conditioned text generation del inputs["prompt"] text = unidiffuser_pipe(**inputs).text expected_text_prefix = " no no no " assert text[0][:10] == expected_text_prefix def test_unidiffuser_text2img_multiple_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs(device) # Delete image for text-conditioned image generation del inputs["image"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 image = unidiffuser_pipe(**inputs).images assert image.shape == (2, 32, 32, 3) def test_unidiffuser_img2text_multiple_prompts(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs(device) # Delete text for image-conditioned text generation del inputs["prompt"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 text = unidiffuser_pipe(**inputs).text assert len(text) == 3 def test_unidiffuser_text2img_multiple_images_with_latents(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete image for text-conditioned image generation del inputs["image"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 image = unidiffuser_pipe(**inputs).images assert image.shape == (2, 32, 32, 3) def test_unidiffuser_img2text_multiple_prompts_with_latents(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() unidiffuser_pipe = UniDiffuserPipeline(**components) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete text for image-conditioned text generation del inputs["prompt"] inputs["num_images_per_prompt"] = 2 inputs["num_prompts_per_image"] = 3 text = unidiffuser_pipe(**inputs).text assert len(text) == 3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=2e-4) @require_torch_gpu def test_unidiffuser_default_joint_v1_cuda_fp16(self): device = "cuda" unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 ) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'joint' unidiffuser_pipe.set_joint_mode() assert unidiffuser_pipe.mode == "joint" inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] inputs["data_type"] = 1 sample = unidiffuser_pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5049, 0.5498, 0.5854, 0.3052, 0.4460, 0.6489, 0.5122, 0.4810, 0.6138]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 expected_text_prefix = '" This This' assert text[0][: len(expected_text_prefix)] == expected_text_prefix @require_torch_gpu def test_unidiffuser_default_text2img_v1_cuda_fp16(self): device = "cuda" unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 ) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'text2img' unidiffuser_pipe.set_text_to_image_mode() assert unidiffuser_pipe.mode == "text2img" inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["image"] inputs["data_type"] = 1 sample = unidiffuser_pipe(**inputs) image = sample.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.5054, 0.5498, 0.5854, 0.3052, 0.4458, 0.6489, 0.5122, 0.4810, 0.6138]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-3 @require_torch_gpu def test_unidiffuser_default_img2text_v1_cuda_fp16(self): device = "cuda" unidiffuser_pipe = UniDiffuserPipeline.from_pretrained( "hf-internal-testing/unidiffuser-test-v1", torch_dtype=torch.float16 ) unidiffuser_pipe = unidiffuser_pipe.to(device) unidiffuser_pipe.set_progress_bar_config(disable=None) # Set mode to 'img2text' unidiffuser_pipe.set_image_to_text_mode() assert unidiffuser_pipe.mode == "img2text" inputs = self.get_dummy_inputs_with_latents(device) # Delete prompt and image for joint inference. del inputs["prompt"] inputs["data_type"] = 1 text = unidiffuser_pipe(**inputs).text expected_text_prefix = '" This This' assert text[0][: len(expected_text_prefix)] == expected_text_prefix @nightly @require_torch_gpu class UniDiffuserPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, seed=0, generate_latents=False): generator = torch.manual_seed(seed) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg" ) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 8.0, "output_type": "numpy", } if generate_latents: latents = self.get_fixed_latents(device, seed=seed) for latent_name, latent_tensor in latents.items(): inputs[latent_name] = latent_tensor return inputs def get_fixed_latents(self, device, seed=0): if isinstance(device, str): device = torch.device(device) latent_device = torch.device("cpu") generator = torch.Generator(device=latent_device).manual_seed(seed) # Hardcode the shapes for now. prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32) vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32) clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32) # Move latents onto desired device. prompt_latents = prompt_latents.to(device) vae_latents = vae_latents.to(device) clip_latents = clip_latents.to(device) latents = { "prompt_latents": prompt_latents, "vae_latents": vae_latents, "clip_latents": clip_latents, } return latents def test_unidiffuser_default_joint_v1(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() # inputs = self.get_dummy_inputs(device) inputs = self.get_inputs(device=torch_device, generate_latents=True) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] sample = pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 1e-1 expected_text_prefix = "a living room" assert text[0][: len(expected_text_prefix)] == expected_text_prefix def test_unidiffuser_default_text2img_v1(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["image"] sample = pipe(**inputs) image = sample.images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_unidiffuser_default_img2text_v1(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["prompt"] sample = pipe(**inputs) text = sample.text expected_text_prefix = "An astronaut" assert text[0][: len(expected_text_prefix)] == expected_text_prefix @unittest.skip(reason="Skip torch.compile test to speed up the slow test suite.") @require_torch_2 def test_unidiffuser_compile(self, seed=0): inputs = self.get_inputs(torch_device, seed=seed, generate_latents=True) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_unidiffuser_compile, inputs=inputs) @nightly @require_torch_gpu class UniDiffuserPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, seed=0, generate_latents=False): generator = torch.manual_seed(seed) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unidiffuser/unidiffuser_example_image.jpg" ) inputs = { "prompt": "an elephant under the sea", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 8.0, "output_type": "numpy", } if generate_latents: latents = self.get_fixed_latents(device, seed=seed) for latent_name, latent_tensor in latents.items(): inputs[latent_name] = latent_tensor return inputs def get_fixed_latents(self, device, seed=0): if isinstance(device, str): device = torch.device(device) latent_device = torch.device("cpu") generator = torch.Generator(device=latent_device).manual_seed(seed) # Hardcode the shapes for now. prompt_latents = randn_tensor((1, 77, 768), generator=generator, device=device, dtype=torch.float32) vae_latents = randn_tensor((1, 4, 64, 64), generator=generator, device=device, dtype=torch.float32) clip_latents = randn_tensor((1, 1, 512), generator=generator, device=device, dtype=torch.float32) # Move latents onto desired device. prompt_latents = prompt_latents.to(device) vae_latents = vae_latents.to(device) clip_latents = clip_latents.to(device) latents = { "prompt_latents": prompt_latents, "vae_latents": vae_latents, "clip_latents": clip_latents, } return latents def test_unidiffuser_default_joint_v1_fp16(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() # inputs = self.get_dummy_inputs(device) inputs = self.get_inputs(device=torch_device, generate_latents=True) # Delete prompt and image for joint inference. del inputs["prompt"] del inputs["image"] sample = pipe(**inputs) image = sample.images text = sample.text assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_img_slice = np.array([0.2402, 0.2375, 0.2285, 0.2378, 0.2407, 0.2263, 0.2354, 0.2307, 0.2520]) assert np.abs(image_slice.flatten() - expected_img_slice).max() < 2e-1 expected_text_prefix = "a living room" assert text[0][: len(expected_text_prefix)] == expected_text_prefix def test_unidiffuser_default_text2img_v1_fp16(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["image"] sample = pipe(**inputs) image = sample.images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0242, 0.0103, 0.0022, 0.0129, 0.0000, 0.0090, 0.0376, 0.0508, 0.0005]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_unidiffuser_default_img2text_v1_fp16(self): pipe = UniDiffuserPipeline.from_pretrained("thu-ml/unidiffuser-v1", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(device=torch_device, generate_latents=True) del inputs["prompt"] sample = pipe(**inputs) text = sample.text expected_text_prefix = "An astronaut" assert text[0][: len(expected_text_prefix)] == expected_text_prefix
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/paint_by_example/test_paint_by_example.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPImageProcessor, CLIPVisionConfig from diffusers import AutoencoderKL, PaintByExamplePipeline, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.paint_by_example import PaintByExampleImageEncoder from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, require_torch_gpu, torch_device, ) from ..pipeline_params import IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class PaintByExamplePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = PaintByExamplePipeline params = IMAGE_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = IMAGE_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO_DO: update the image_prams once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, image_size=32, patch_size=4, ) image_encoder = PaintByExampleImageEncoder(config, proj_size=32) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "image_encoder": image_encoder, "safety_checker": None, "feature_extractor": feature_extractor, } return components def convert_to_pt(self, image): image = np.array(image.convert("RGB")) image = image[None].transpose(0, 3, 1, 2) image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0 return image def get_dummy_inputs(self, device="cpu", seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) example_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "example_image": example_image, "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_paint_by_example_inpaint(self): components = self.get_dummy_components() # make sure here that pndm scheduler skips prk pipe = PaintByExamplePipeline(**components) pipe = pipe.to("cpu") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() output = pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4686, 0.5687, 0.4007, 0.5218, 0.5741, 0.4482, 0.4940, 0.4629, 0.4503]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_paint_by_example_image_tensor(self): device = "cpu" inputs = self.get_dummy_inputs() inputs.pop("mask_image") image = self.convert_to_pt(inputs.pop("image")) mask_image = image.clamp(0, 1) / 2 # make sure here that pndm scheduler skips prk pipe = PaintByExamplePipeline(**self.get_dummy_components()) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(image=image, mask_image=mask_image[:, 0], **inputs) out_1 = output.images image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = mask_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB") output = pipe(**self.get_dummy_inputs()) out_2 = output.images assert out_1.shape == (1, 64, 64, 3) assert np.abs(out_1.flatten() - out_2.flatten()).max() < 5e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @nightly @require_torch_gpu class PaintByExamplePipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_paint_by_example(self): # make sure here that pndm scheduler skips prk init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/paint_by_example/dog_in_bucket.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/paint_by_example/mask.png" ) example_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/paint_by_example/panda.jpg" ) pipe = PaintByExamplePipeline.from_pretrained("Fantasy-Studio/Paint-by-Example") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(321) output = pipe( image=init_image, mask_image=mask_image, example_image=example_image, generator=generator, guidance_scale=5.0, num_inference_steps=50, output_type="np", ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.4834, 0.4811, 0.4874, 0.5122, 0.5081, 0.5144, 0.5291, 0.5290, 0.5374]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/ip_adapters/test_ip_adapter_stable_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPVisionModelWithProjection, ) from diffusers import ( StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionPipeline, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLInpaintPipeline, StableDiffusionXLPipeline, ) from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0 from diffusers.utils import load_image from diffusers.utils.testing_utils import ( enable_full_determinism, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class IPAdapterNightlyTestsMixin(unittest.TestCase): dtype = torch.float16 def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_image_encoder(self, repo_id, subfolder): image_encoder = CLIPVisionModelWithProjection.from_pretrained( repo_id, subfolder=subfolder, torch_dtype=self.dtype ).to(torch_device) return image_encoder def get_image_processor(self, repo_id): image_processor = CLIPImageProcessor.from_pretrained(repo_id) return image_processor def get_dummy_inputs(self, for_image_to_image=False, for_inpainting=False, for_sdxl=False): image = load_image( "https://user-images.githubusercontent.com/24734142/266492875-2d50d223-8475-44f0-a7c6-08b51cb53572.png" ) if for_sdxl: image = image.resize((1024, 1024)) input_kwargs = { "prompt": "best quality, high quality", "negative_prompt": "monochrome, lowres, bad anatomy, worst quality, low quality", "num_inference_steps": 5, "generator": torch.Generator(device="cpu").manual_seed(33), "ip_adapter_image": image, "output_type": "np", } if for_image_to_image: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/vermeer.jpg") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/river.png") if for_sdxl: image = image.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "ip_adapter_image": ip_image}) elif for_inpainting: image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/inpaint_image.png") mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/mask.png") ip_image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/girl.png") if for_sdxl: image = image.resize((1024, 1024)) mask = mask.resize((1024, 1024)) ip_image = ip_image.resize((1024, 1024)) input_kwargs.update({"image": image, "mask_image": mask, "ip_adapter_image": ip_image}) return input_kwargs @slow @require_torch_gpu class IPAdapterSDIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.8110, 0.8843, 0.9326, 0.9224, 0.9878, 1.0, 0.9736, 1.0, 1.0]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.3013, 0.2615, 0.2202, 0.2722, 0.2510, 0.2023, 0.2498, 0.2415, 0.2139]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_image_to_image(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.2253, 0.2251, 0.2219, 0.2312, 0.2236, 0.2434, 0.2275, 0.2575, 0.2805]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.3550, 0.2600, 0.2520, 0.2412, 0.1870, 0.3831, 0.1453, 0.1880, 0.5371]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_inpainting(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.2700, 0.2388, 0.2202, 0.2304, 0.2095, 0.2097, 0.2173, 0.2058, 0.1987]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-plus_sd15.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.2744, 0.2410, 0.2202, 0.2334, 0.2090, 0.2053, 0.2175, 0.2033, 0.1934]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_text_to_image_model_cpu_offload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipeline.to(torch_device) inputs = self.get_dummy_inputs() output_without_offload = pipeline(**inputs).images pipeline.enable_model_cpu_offload() inputs = self.get_dummy_inputs() output_with_offload = pipeline(**inputs).images max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-3, "CPU offloading should not affect the inference results") offloaded_modules = [ v for k, v in pipeline.components.items() if isinstance(v, torch.nn.Module) and k not in pipeline._exclude_from_cpu_offload ] ( self.assertTrue(all(v.device.type == "cpu" for v in offloaded_modules)), f"Not offloaded: {[v for v in offloaded_modules if v.device.type != 'cpu']}", ) def test_text_to_image_full_face(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter-full-face_sd15.bin") pipeline.set_ip_adapter_scale(0.7) inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array( [0.1706543, 0.1303711, 0.12573242, 0.21777344, 0.14550781, 0.14038086, 0.40820312, 0.41455078, 0.42529297] ) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_unload(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", image_encoder=image_encoder, safety_checker=None, torch_dtype=self.dtype ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="models", weight_name="ip-adapter_sd15.bin") pipeline.set_ip_adapter_scale(0.7) pipeline.unload_ip_adapter() assert getattr(pipeline, "image_encoder") is None assert getattr(pipeline, "feature_extractor") is None processors = [ isinstance(attn_proc, (AttnProcessor, AttnProcessor2_0)) for name, attn_proc in pipeline.unet.attn_processors.items() ] assert processors == [True] * len(processors) @slow @require_torch_gpu class IPAdapterSDXLIntegrationTests(IPAdapterNightlyTestsMixin): def test_text_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.0965, 0.0956, 0.0849, 0.0908, 0.0944, 0.0927, 0.0888, 0.0929, 0.0920]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") pipeline = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs() images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.0592, 0.0573, 0.0459, 0.0542, 0.0559, 0.0523, 0.0500, 0.0540, 0.0501]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_image_to_image_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.0652, 0.0698, 0.0723, 0.0744, 0.0699, 0.0636, 0.0784, 0.0803, 0.0742]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLImg2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs(for_image_to_image=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() expected_slice = np.array([0.0708, 0.0701, 0.0735, 0.0760, 0.0739, 0.0679, 0.0756, 0.0824, 0.0837]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) def test_inpainting_sdxl(self): image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="sdxl_models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter("h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter_sdxl.bin") inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() image_slice.tolist() expected_slice = np.array([0.1420, 0.1495, 0.1430, 0.1462, 0.1493, 0.1502, 0.1474, 0.1502, 0.1517]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4) image_encoder = self.get_image_encoder(repo_id="h94/IP-Adapter", subfolder="models/image_encoder") feature_extractor = self.get_image_processor("laion/CLIP-ViT-bigG-14-laion2B-39B-b160k") pipeline = StableDiffusionXLInpaintPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", image_encoder=image_encoder, feature_extractor=feature_extractor, torch_dtype=self.dtype, ) pipeline.to(torch_device) pipeline.load_ip_adapter( "h94/IP-Adapter", subfolder="sdxl_models", weight_name="ip-adapter-plus_sdxl_vit-h.bin", ) inputs = self.get_dummy_inputs(for_inpainting=True) images = pipeline(**inputs).images image_slice = images[0, :3, :3, -1].flatten() image_slice.tolist() expected_slice = np.array([0.1398, 0.1476, 0.1407, 0.1442, 0.1470, 0.1480, 0.1449, 0.1481, 0.1494]) assert np.allclose(image_slice, expected_slice, atol=1e-4, rtol=1e-4)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_sag.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DEISMultistepScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, StableDiffusionSAGPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionSAGPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=8, norm_num_groups=1, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=8, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], norm_num_groups=1, in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=8, num_hidden_layers=2, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ".", "generator": generator, "num_inference_steps": 2, "guidance_scale": 1.0, "sag_scale": 1.0, "output_type": "np", } return inputs def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @unittest.skip("Not necessary to test here.") def test_xformers_attention_forwardGenerator_pass(self): pass def test_pipeline_different_schedulers(self): pipeline = self.pipeline_class(**self.get_dummy_components()) inputs = self.get_dummy_inputs("cpu") expected_image_size = (16, 16, 3) for scheduler_cls in [DDIMScheduler, DEISMultistepScheduler, DPMSolverMultistepScheduler]: pipeline.scheduler = scheduler_cls.from_config(pipeline.scheduler.config) image = pipeline(**inputs).images[0] shape = image.shape assert shape == expected_image_size pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config) with self.assertRaises(ValueError): # Karras schedulers are not supported image = pipeline(**inputs).images[0] @nightly @require_torch_gpu class StableDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_1(self): sag_pipe = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sag_pipe = sag_pipe.to(torch_device) sag_pipe.set_progress_bar_config(disable=None) prompt = "." generator = torch.manual_seed(0) output = sag_pipe( [prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np" ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def test_stable_diffusion_2(self): sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sag_pipe = sag_pipe.to(torch_device) sag_pipe.set_progress_bar_config(disable=None) prompt = "." generator = torch.manual_seed(0) output = sag_pipe( [prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np" ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def test_stable_diffusion_2_non_square(self): sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sag_pipe = sag_pipe.to(torch_device) sag_pipe.set_progress_bar_config(disable=None) prompt = "." generator = torch.manual_seed(0) output = sag_pipe( [prompt], width=768, height=512, generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np", ) image = output.images assert image.shape == (1, 512, 768, 3)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_adapter.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from parameterized import parameterized from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, LCMScheduler, MultiAdapter, PNDMScheduler, StableDiffusionAdapterPipeline, T2IAdapter, UNet2DConditionModel, ) from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class AdapterTests: pipeline_class = StableDiffusionAdapterPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS def get_dummy_components(self, adapter_type, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, time_cond_proj_dim=time_cond_proj_dim, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) if adapter_type == "full_adapter" or adapter_type == "light_adapter": adapter = T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=2, adapter_type=adapter_type, ) elif adapter_type == "multi_adapter": adapter = MultiAdapter( [ T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=2, adapter_type="full_adapter", ), T2IAdapter( in_channels=3, channels=[32, 64], num_res_blocks=2, downscale_factor=2, adapter_type="full_adapter", ), ] ) else: raise ValueError( f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter', 'light_adapter', or 'multi_adapter''" ) components = { "adapter": adapter, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_components_with_full_downscaling(self, adapter_type): """Get dummy components with x8 VAE downscaling and 4 UNet down blocks. These dummy components are intended to fully-exercise the T2I-Adapter downscaling behavior. """ torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 32, 32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 32, 32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") torch.manual_seed(0) if adapter_type == "full_adapter" or adapter_type == "light_adapter": adapter = T2IAdapter( in_channels=3, channels=[32, 32, 32, 64], num_res_blocks=2, downscale_factor=8, adapter_type=adapter_type, ) elif adapter_type == "multi_adapter": adapter = MultiAdapter( [ T2IAdapter( in_channels=3, channels=[32, 32, 32, 64], num_res_blocks=2, downscale_factor=8, adapter_type="full_adapter", ), T2IAdapter( in_channels=3, channels=[32, 32, 32, 64], num_res_blocks=2, downscale_factor=8, adapter_type="full_adapter", ), ] ) else: raise ValueError( f"Unknown adapter type: {adapter_type}, must be one of 'full_adapter', 'light_adapter', or 'multi_adapter''" ) components = { "adapter": adapter, "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0, height=64, width=64, num_images=1): if num_images == 1: image = floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) else: image = [ floats_tensor((1, 3, height, width), rng=random.Random(seed)).to(device) for _ in range(num_images) ] if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) @parameterized.expand( [ # (dim=264) The internal feature map will be 33x33 after initial pixel unshuffling (downscaled x8). (((4 * 8 + 1) * 8),), # (dim=272) The internal feature map will be 17x17 after the first T2I down block (downscaled x16). (((4 * 4 + 1) * 16),), # (dim=288) The internal feature map will be 9x9 after the second T2I down block (downscaled x32). (((4 * 2 + 1) * 32),), # (dim=320) The internal feature map will be 5x5 after the third T2I down block (downscaled x64). (((4 * 1 + 1) * 64),), ] ) def test_multiple_image_dimensions(self, dim): """Test that the T2I-Adapter pipeline supports any input dimension that is divisible by the adapter's `downscale_factor`. This test was added in response to an issue where the T2I Adapter's downscaling padding behavior did not match the UNet's behavior. Note that we have selected `dim` values to produce odd resolutions at each downscaling level. """ components = self.get_dummy_components_with_full_downscaling() sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device, height=dim, width=dim) image = sd_pipe(**inputs).images assert image.shape == (1, dim, dim, 3) def test_adapter_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4535, 0.5493, 0.4359, 0.5452, 0.6086, 0.4441, 0.5544, 0.501, 0.4859]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_adapter_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4535, 0.5493, 0.4359, 0.5452, 0.6086, 0.4441, 0.5544, 0.501, 0.4859]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionFullAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): def get_dummy_components(self, time_cond_proj_dim=None): return super().get_dummy_components("full_adapter", time_cond_proj_dim=time_cond_proj_dim) def get_dummy_components_with_full_downscaling(self): return super().get_dummy_components_with_full_downscaling("full_adapter") def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4858, 0.5500, 0.4278, 0.4669, 0.6184, 0.4322, 0.5010, 0.5033, 0.4746]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 class StableDiffusionLightAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): def get_dummy_components(self, time_cond_proj_dim=None): return super().get_dummy_components("light_adapter", time_cond_proj_dim=time_cond_proj_dim) def get_dummy_components_with_full_downscaling(self): return super().get_dummy_components_with_full_downscaling("light_adapter") def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4965, 0.5548, 0.4330, 0.4771, 0.6226, 0.4382, 0.5037, 0.5071, 0.4782]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 class StableDiffusionMultiAdapterPipelineFastTests(AdapterTests, PipelineTesterMixin, unittest.TestCase): def get_dummy_components(self, time_cond_proj_dim=None): return super().get_dummy_components("multi_adapter", time_cond_proj_dim=time_cond_proj_dim) def get_dummy_components_with_full_downscaling(self): return super().get_dummy_components_with_full_downscaling("multi_adapter") def get_dummy_inputs(self, device, height=64, width=64, seed=0): inputs = super().get_dummy_inputs(device, seed, height=height, width=width, num_images=2) inputs["adapter_conditioning_scale"] = [0.5, 0.5] return inputs def test_stable_diffusion_adapter_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionAdapterPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4902, 0.5539, 0.4317, 0.4682, 0.6190, 0.4351, 0.5018, 0.5046, 0.4772]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 def test_inference_batch_consistent( self, batch_sizes=[2, 4, 13], additional_params_copy_to_batched_inputs=["num_inference_steps"] ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs for batch_size in batch_sizes: batched_inputs = {} for name, value in inputs.items(): if name in self.batch_params: # prompt is string if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_inputs[name][-1] = 100 * "very long" elif name == "image": batched_images = [] for image in value: batched_images.append(batch_size * [image]) batched_inputs[name] = batched_images else: batched_inputs[name] = batch_size * [value] elif name == "batch_size": batched_inputs[name] = batch_size else: batched_inputs[name] = value for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] batched_inputs["output_type"] = "np" if self.pipeline_class.__name__ == "DanceDiffusionPipeline": batched_inputs.pop("output_type") output = pipe(**batched_inputs) assert len(output[0]) == batch_size batched_inputs["output_type"] = "np" if self.pipeline_class.__name__ == "DanceDiffusionPipeline": batched_inputs.pop("output_type") output = pipe(**batched_inputs)[0] assert output.shape[0] == batch_size logger.setLevel(level=diffusers.logging.WARNING) def test_num_images_per_prompt(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) batch_sizes = [1, 2] num_images_per_prompts = [1, 2] for batch_size in batch_sizes: for num_images_per_prompt in num_images_per_prompts: inputs = self.get_dummy_inputs(torch_device) for key in inputs.keys(): if key in self.batch_params: if key == "image": batched_images = [] for image in inputs[key]: batched_images.append(batch_size * [image]) inputs[key] = batched_images else: inputs[key] = batch_size * [inputs[key]] images = pipe(**inputs, num_images_per_prompt=num_images_per_prompt)[0] assert images.shape[0] == batch_size * num_images_per_prompt def test_inference_batch_single_identical( self, batch_size=3, test_max_difference=None, test_mean_pixel_difference=None, relax_max_difference=False, expected_max_diff=2e-3, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): if test_max_difference is None: # TODO(Pedro) - not sure why, but not at all reproducible at the moment it seems # make sure that batched and non-batched is identical test_max_difference = torch_device != "mps" if test_mean_pixel_difference is None: # TODO same as above test_mean_pixel_difference = torch_device != "mps" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batch_size = batch_size for name, value in inputs.items(): if name in self.batch_params: # prompt is string if name == "prompt": len_prompt = len(value) # make unequal batch sizes batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] # make last batch super long batched_inputs[name][-1] = 100 * "very long" elif name == "image": batched_images = [] for image in value: batched_images.append(batch_size * [image]) batched_inputs[name] = batched_images else: batched_inputs[name] = batch_size * [value] elif name == "batch_size": batched_inputs[name] = batch_size elif name == "generator": batched_inputs[name] = [self.get_generator(i) for i in range(batch_size)] else: batched_inputs[name] = value for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] if self.pipeline_class.__name__ != "DanceDiffusionPipeline": batched_inputs["output_type"] = "np" output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size inputs["generator"] = self.get_generator(0) output = pipe(**inputs) logger.setLevel(level=diffusers.logging.WARNING) if test_max_difference: if relax_max_difference: # Taking the median of the largest <n> differences # is resilient to outliers diff = np.abs(output_batch[0][0] - output[0][0]) diff = diff.flatten() diff.sort() max_diff = np.median(diff[-5:]) else: max_diff = np.abs(output_batch[0][0] - output[0][0]).max() assert max_diff < expected_max_diff if test_mean_pixel_difference: assert_mean_pixel_difference(output_batch[0][0], output[0][0]) @slow @require_torch_gpu class StableDiffusionAdapterPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_adapter_color(self): adapter_model = "TencentARC/t2iadapter_color_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "snail" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/color.png" ) input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_color_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_depth(self): adapter_model = "TencentARC/t2iadapter_depth_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "snail" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/color.png" ) input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_color_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_depth_sd_v14(self): adapter_model = "TencentARC/t2iadapter_depth_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "desk" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_depth_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_depth_sd_v15(self): adapter_model = "TencentARC/t2iadapter_depth_sd15v2" sd_model = "runwayml/stable-diffusion-v1-5" prompt = "desk" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/desk_depth.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_depth_sd15v2.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_keypose_sd_v14(self): adapter_model = "TencentARC/t2iadapter_keypose_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "person" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/person_keypose.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_keypose_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_openpose_sd_v14(self): adapter_model = "TencentARC/t2iadapter_openpose_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "person" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/iron_man_pose.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_openpose_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_seg_sd_v14(self): adapter_model = "TencentARC/t2iadapter_seg_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "motorcycle" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motor.png" ) input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_seg_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_zoedepth_sd_v15(self): adapter_model = "TencentARC/t2iadapter_zoedepth_sd15v1" sd_model = "runwayml/stable-diffusion-v1-5" prompt = "motorcycle" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motorcycle.png" input_channels = 3 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_zoedepth_sd15v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_canny_sd_v14(self): adapter_model = "TencentARC/t2iadapter_canny_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "toy" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" input_channels = 1 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_canny_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_canny_sd_v15(self): adapter_model = "TencentARC/t2iadapter_canny_sd15v2" sd_model = "runwayml/stable-diffusion-v1-5" prompt = "toy" image_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/toy_canny.png" input_channels = 1 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_canny_sd15v2.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_sketch_sd14(self): adapter_model = "TencentARC/t2iadapter_sketch_sd14v1" sd_model = "CompVis/stable-diffusion-v1-4" prompt = "cat" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png" ) input_channels = 1 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_sketch_sd14v1.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_sketch_sd15(self): adapter_model = "TencentARC/t2iadapter_sketch_sd15v2" sd_model = "runwayml/stable-diffusion-v1-5" prompt = "cat" image_url = ( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/edge.png" ) input_channels = 1 out_url = "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/t2iadapter_sketch_sd15v2.npy" image = load_image(image_url) expected_out = load_numpy(out_url) if input_channels == 1: image = image.convert("L") adapter = T2IAdapter.from_pretrained(adapter_model, torch_dtype=torch.float16) pipe = StableDiffusionAdapterPipeline.from_pretrained(sd_model, adapter=adapter, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() generator = torch.Generator(device="cpu").manual_seed(0) out = pipe(prompt=prompt, image=image, generator=generator, num_inference_steps=2, output_type="np").images max_diff = numpy_cosine_similarity_distance(out.flatten(), expected_out.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_adapter_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() adapter = T2IAdapter.from_pretrained("TencentARC/t2iadapter_seg_sd14v1") pipe = StableDiffusionAdapterPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", adapter=adapter, safety_checker=None ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/t2i_adapter/motor.png" ) pipe(prompt="foo", image=image, num_inference_steps=2) mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 5 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_instruction_pix2pix.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPix2PixPipeline, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionInstructPix2PixPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionInstructPix2PixPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"image_latents"}) - {"negative_prompt_embeds"} def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=8, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def test_stable_diffusion_pix2pix_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * 2 image = np.array(inputs["image"]).astype(np.float32) / 255.0 image = torch.from_numpy(image).unsqueeze(0).to(device) image = image / 2 + 0.5 image = image.permute(0, 3, 1, 2) inputs["image"] = image.repeat(2, 1, 1, 1) image = sd_pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionInstructPix2PixPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] slice = [round(x, 4) for x in image_slice.flatten().tolist()] print(",".join([str(x) for x in slice])) assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) # Overwrite the default test_latents_inputs because pix2pix encode the image differently def test_latents_input(self): components = self.get_dummy_components() pipe = StableDiffusionInstructPix2PixPipeline(**components) pipe.image_processor = VaeImageProcessor(do_resize=False, do_normalize=False) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) out = pipe(**self.get_dummy_inputs_by_type(torch_device, input_image_type="pt"))[0] vae = components["vae"] inputs = self.get_dummy_inputs_by_type(torch_device, input_image_type="pt") for image_param in self.image_latents_params: if image_param in inputs.keys(): inputs[image_param] = vae.encode(inputs[image_param]).latent_dist.mode() out_latents_inputs = pipe(**inputs)[0] max_diff = np.abs(out - out_latents_inputs).max() self.assertLess(max_diff, 1e-4, "passing latents as image input generate different result from passing image") # Override the default test_callback_cfg because pix2pix create inputs for cfg differently def test_callback_cfg(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) def callback_no_cfg(pipe, i, t, callback_kwargs): if i == 1: for k, w in callback_kwargs.items(): if k in self.callback_cfg_params: callback_kwargs[k] = callback_kwargs[k].chunk(3)[0] pipe._guidance_scale = 1.0 return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["guidance_scale"] = 1.0 inputs["num_inference_steps"] = 2 out_no_cfg = pipe(**inputs)[0] inputs["guidance_scale"] = 7.5 inputs["callback_on_step_end"] = callback_no_cfg inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs out_callback_no_cfg = pipe(**inputs)[0] assert out_no_cfg.shape == out_callback_no_cfg.shape @slow @require_torch_gpu class StableDiffusionInstructPix2PixPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0): generator = torch.manual_seed(seed) image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) inputs = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def test_stable_diffusion_pix2pix_default(self): pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_k_lms(self): pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_ddim(self): pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( "timbrooks/instruct-pix2pix", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs() _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def test_stable_diffusion_pix2pix_pipeline_multiple_of_8(self): inputs = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 inputs["image"] = inputs["image"].resize((504, 504)) model_id = "timbrooks/instruct-pix2pix" pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained( model_id, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() output = pipe(**inputs) image = output.images[0] image_slice = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) expected_slice = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import traceback import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel, ) from diffusers.models.attention_processor import AttnProcessor from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_inpaint_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0689, 0.0699, 0.0790, 0.0536, 0.0470, 0.0488, 0.041, 0.0508, 0.04179]) assert np.abs(expected_slice - image_slice).max() < 3e-3 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class StableDiffusionInpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), time_cond_proj_dim=time_cond_proj_dim, layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0, img_res=64, output_pil=True): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched if output_pil: # Get random floats in [0, 1] as image image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = torch.ones_like(image) # Convert image and mask_image to [0, 255] image = 255 * image mask_image = 255 * mask_image # Convert to PIL image init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB").resize((img_res, img_res)) else: # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) # Convert image to [-1, 1] init_image = 2.0 * image - 1.0 mask_image = torch.ones((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4703, 0.5697, 0.3879, 0.5470, 0.6042, 0.4413, 0.5078, 0.4728, 0.4469]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_image_tensor(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) out_pil = output.images inputs = self.get_dummy_inputs(device) inputs["image"] = torch.tensor(np.array(inputs["image"]) / 127.5 - 1).permute(2, 0, 1).unsqueeze(0) inputs["mask_image"] = torch.tensor(np.array(inputs["mask_image"]) / 255).permute(2, 0, 1)[:1].unsqueeze(0) output = sd_pipe(**inputs) out_tensor = output.images assert out_pil.shape == (1, 64, 64, 3) assert np.abs(out_pil.flatten() - out_tensor.flatten()).max() < 5e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_stable_diffusion_inpaint_strength_zero_test(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) # check that the pipeline raises value error when num_inference_steps is < 1 inputs["strength"] = 0.01 with self.assertRaises(ValueError): sd_pipe(**inputs).images def test_stable_diffusion_inpaint_mask_latents(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(device) sd_pipe.set_progress_bar_config(disable=None) # normal mask + normal image ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None inputs = self.get_dummy_inputs(device) inputs["strength"] = 0.9 out_0 = sd_pipe(**inputs).images # image latents + mask latents inputs = self.get_dummy_inputs(device) image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) masked_image = image * (mask < 0.5) generator = torch.Generator(device=device).manual_seed(0) image_latents = ( sd_pipe.vae.encode(image).latent_dist.sample(generator=generator) * sd_pipe.vae.config.scaling_factor ) torch.randn((1, 4, 32, 32), generator=generator) mask_latents = ( sd_pipe.vae.encode(masked_image).latent_dist.sample(generator=generator) * sd_pipe.vae.config.scaling_factor ) inputs["image"] = image_latents inputs["masked_image_latents"] = mask_latents inputs["mask_image"] = mask inputs["strength"] = 0.9 generator = torch.Generator(device=device).manual_seed(0) torch.randn((1, 4, 32, 32), generator=generator) inputs["generator"] = generator out_1 = sd_pipe(**inputs).images assert np.abs(out_0 - out_1).max() < 1e-2 def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 3 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) class StableDiffusionSimpleInpaintPipelineFastTests(StableDiffusionInpaintPipelineFastTests): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs_2images(self, device, seed=0, img_res=64): # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) # Convert images to [-1, 1] init_image1 = 2.0 * image1 - 1.0 init_image2 = 2.0 * image2 - 1.0 # empty mask mask_image = torch.zeros((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator1 = torch.manual_seed(seed) generator2 = torch.manual_seed(seed) else: generator1 = torch.Generator(device=device).manual_seed(seed) generator2 = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ["A painting of a squirrel eating a burger"] * 2, "image": [init_image1, init_image2], "mask_image": [mask_image] * 2, "generator": [generator1, generator2], "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6584, 0.5424, 0.5649, 0.5449, 0.5897, 0.6111, 0.5404, 0.5463, 0.5214]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_2_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test to confirm if we pass two same image, we will get same output inputs = self.get_dummy_inputs(device) gen1 = torch.Generator(device=device).manual_seed(0) gen2 = torch.Generator(device=device).manual_seed(0) for name in ["prompt", "image", "mask_image"]: inputs[name] = [inputs[name]] * 2 inputs["generator"] = [gen1, gen2] images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 # test to confirm that if we pass two different images, we will get different output inputs = self.get_dummy_inputs_2images(device) images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2 @slow @require_torch_gpu class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint_ddim(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0427, 0.0460, 0.0483, 0.0460, 0.0584, 0.0521, 0.1549, 0.1695, 0.1794]) assert np.abs(expected_slice - image_slice).max() < 6e-4 def test_stable_diffusion_inpaint_fp16(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1509, 0.1245, 0.1672, 0.1655, 0.1519, 0.1226, 0.1462, 0.1567, 0.2451]) assert np.abs(expected_slice - image_slice).max() < 1e-1 def test_stable_diffusion_inpaint_pndm(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0425, 0.0273, 0.0344, 0.1694, 0.1727, 0.1812, 0.3256, 0.3311, 0.3272]) assert np.abs(expected_slice - image_slice).max() < 5e-3 def test_stable_diffusion_inpaint_k_lms(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9314, 0.7575, 0.9432, 0.8885, 0.9028, 0.7298, 0.9811, 0.9667, 0.7633]) assert np.abs(expected_slice - image_slice).max() < 6e-3 def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 @require_python39_or_higher @require_torch_2 def test_inpaint_compile(self): seed = 0 inputs = self.get_inputs(torch_device, seed=seed) # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_inpaint_compile, inputs=inputs) def test_stable_diffusion_inpaint_pil_input_resolution_test(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input image to a random size (one that would cause a tensor mismatch error) inputs["image"] = inputs["image"].resize((127, 127)) inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) inputs["height"] = 128 inputs["width"] = 128 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, inputs["height"], inputs["width"], 3) def test_stable_diffusion_inpaint_strength_test(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input strength inputs["strength"] = 0.75 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, 512, 512, 3) image_slice = image[0, 253:256, 253:256, -1].flatten() expected_slice = np.array([0.2728, 0.2803, 0.2665, 0.2511, 0.2774, 0.2586, 0.2391, 0.2392, 0.2582]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_simple_inpaint_ddim(self): pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3757, 0.3875, 0.4445, 0.4353, 0.3780, 0.4513, 0.3965, 0.3984, 0.4362]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_download_local(self): filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt") pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image_out = pipe(**inputs).images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-inpainting/blob/main/sd-v1-5-inpainting.ckpt" pipe = StableDiffusionInpaintPipeline.from_single_file(ckpt_path) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 5 image_ckpt = pipe(**inputs).images[0] pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 5 image = pipe(**inputs).images[0] assert np.max(np.abs(image - image_ckpt)) < 5e-4 @slow @require_torch_gpu class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint_ddim(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.vae = vae pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0522, 0.0604, 0.0596, 0.0449, 0.0493, 0.0427, 0.1186, 0.1289, 0.1442]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_inpaint_fp16(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1343, 0.1406, 0.1440, 0.1504, 0.1729, 0.0989, 0.1807, 0.2822, 0.1179]) assert np.abs(expected_slice - image_slice).max() < 5e-2 def test_stable_diffusion_inpaint_pndm(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0966, 0.1083, 0.1148, 0.1422, 0.1318, 0.1197, 0.3702, 0.3537, 0.3288]) assert np.abs(expected_slice - image_slice).max() < 5e-3 def test_stable_diffusion_inpaint_k_lms(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.8931, 0.8683, 0.8965, 0.8501, 0.8592, 0.9118, 0.8734, 0.7463, 0.8990]) assert np.abs(expected_slice - image_slice).max() < 6e-3 def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16 ) pipe.vae = vae pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.45 GB is allocated assert mem_bytes < 2.45 * 10**9 @require_python39_or_higher @require_torch_2 def test_inpaint_compile(self): pass def test_stable_diffusion_inpaint_pil_input_resolution_test(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input image to a random size (one that would cause a tensor mismatch error) inputs["image"] = inputs["image"].resize((127, 127)) inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) inputs["height"] = 128 inputs["width"] = 128 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, inputs["height"], inputs["width"], 3) def test_stable_diffusion_inpaint_strength_test(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input strength inputs["strength"] = 0.75 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, 512, 512, 3) image_slice = image[0, 253:256, 253:256, -1].flatten() expected_slice = np.array([0.2458, 0.2576, 0.3124, 0.2679, 0.2669, 0.2796, 0.2872, 0.2975, 0.2661]) assert np.abs(expected_slice - image_slice).max() < 3e-3 def test_stable_diffusion_simple_inpaint_ddim(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) pipe.vae = vae pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3296, 0.4041, 0.4097, 0.4145, 0.4342, 0.4152, 0.4927, 0.4931, 0.4430]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_download_local(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt") pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.vae = vae pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image_out = pipe(**inputs).images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): pass @nightly @require_torch_gpu class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_inpaint_ddim(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_pndm(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = PNDMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_lms(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_dpm(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 30 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 class StableDiffusionInpaintingPrepareMaskAndMaskedImageTests(unittest.TestCase): def test_pil_inputs(self): height, width = 32, 32 im = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8) im = Image.fromarray(im) mask = np.random.randint(0, 255, (height, width), dtype=np.uint8) > 127.5 mask = Image.fromarray((mask * 255).astype(np.uint8)) t_mask, t_masked, t_image = prepare_mask_and_masked_image(im, mask, height, width, return_image=True) self.assertTrue(isinstance(t_mask, torch.Tensor)) self.assertTrue(isinstance(t_masked, torch.Tensor)) self.assertTrue(isinstance(t_image, torch.Tensor)) self.assertEqual(t_mask.ndim, 4) self.assertEqual(t_masked.ndim, 4) self.assertEqual(t_image.ndim, 4) self.assertEqual(t_mask.shape, (1, 1, height, width)) self.assertEqual(t_masked.shape, (1, 3, height, width)) self.assertEqual(t_image.shape, (1, 3, height, width)) self.assertTrue(t_mask.dtype == torch.float32) self.assertTrue(t_masked.dtype == torch.float32) self.assertTrue(t_image.dtype == torch.float32) self.assertTrue(t_mask.min() >= 0.0) self.assertTrue(t_mask.max() <= 1.0) self.assertTrue(t_masked.min() >= -1.0) self.assertTrue(t_masked.min() <= 1.0) self.assertTrue(t_image.min() >= -1.0) self.assertTrue(t_image.min() >= -1.0) self.assertTrue(t_mask.sum() > 0.0) def test_np_inputs(self): height, width = 32, 32 im_np = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8) im_pil = Image.fromarray(im_np) mask_np = ( np.random.randint( 0, 255, ( height, width, ), dtype=np.uint8, ) > 127.5 ) mask_pil = Image.fromarray((mask_np * 255).astype(np.uint8)) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) t_mask_pil, t_masked_pil, t_image_pil = prepare_mask_and_masked_image( im_pil, mask_pil, height, width, return_image=True ) self.assertTrue((t_mask_np == t_mask_pil).all()) self.assertTrue((t_masked_np == t_masked_pil).all()) self.assertTrue((t_image_np == t_image_pil).all()) def test_torch_3D_2D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy().transpose(1, 2, 0) mask_np = mask_tensor.numpy() t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_3D_3D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy().transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_2D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy() t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_3D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_4D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0][0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_batch_4D_3D(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 2, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 2, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor] mask_nps = [mask.numpy() for mask in mask_tensor] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)] t_mask_np = torch.cat([n[0] for n in nps]) t_masked_np = torch.cat([n[1] for n in nps]) t_image_np = torch.cat([n[2] for n in nps]) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_batch_4D_4D(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 2, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 2, 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor] mask_nps = [mask.numpy()[0] for mask in mask_tensor] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)] t_mask_np = torch.cat([n[0] for n in nps]) t_masked_np = torch.cat([n[1] for n in nps]) t_image_np = torch.cat([n[2] for n in nps]) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_shape_mismatch(self): height, width = 32, 32 # test height and width with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 3, height, width, ), torch.randn(64, 64), height, width, return_image=True, ) # test batch dim with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 2, 3, height, width, ), torch.randn(4, 64, 64), height, width, return_image=True, ) # test batch dim with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 2, 3, height, width, ), torch.randn(4, 1, 64, 64), height, width, return_image=True, ) def test_type_mismatch(self): height, width = 32, 32 # test tensors-only with self.assertRaises(TypeError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.rand( 3, height, width, ).numpy(), height, width, return_image=True, ) # test tensors-only with self.assertRaises(TypeError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ).numpy(), torch.rand( 3, height, width, ), height, width, return_image=True, ) def test_channels_first(self): height, width = 32, 32 # test channels first for 3D tensors with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.rand(height, width, 3), torch.rand( 3, height, width, ), height, width, return_image=True, ) def test_tensor_range(self): height, width = 32, 32 # test im <= 1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.ones( 3, height, width, ) * 2, torch.rand( height, width, ), height, width, return_image=True, ) # test im >= -1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.ones( 3, height, width, ) * (-2), torch.rand( height, width, ), height, width, return_image=True, ) # test mask <= 1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.ones( height, width, ) * 2, height, width, return_image=True, ) # test mask >= 0 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.ones( height, width, ) * -1, height, width, return_image=True, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_ldm3d.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, PNDMScheduler, StableDiffusionLDM3DPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS enable_full_determinism() class StableDiffusionLDM3DPipelineFastTests(unittest.TestCase): pipeline_class = StableDiffusionLDM3DPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=6, out_channels=6, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() ldm3d_pipe = StableDiffusionLDM3DPipeline(**components) ldm3d_pipe = ldm3d_pipe.to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth image_slice_rgb = rgb[0, -3:, -3:, -1] image_slice_depth = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) expected_slice_rgb = np.array( [0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] ) expected_slice_depth = np.array([103.46727, 85.812004, 87.849236]) assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb).max() < 1e-2 assert np.abs(image_slice_depth.flatten() - expected_slice_depth).max() < 1e-2 def test_stable_diffusion_prompt_embeds(self): components = self.get_dummy_components() ldm3d_pipe = StableDiffusionLDM3DPipeline(**components) ldm3d_pipe = ldm3d_pipe.to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = ldm3d_pipe(**inputs) rgb_slice_1, depth_slice_1 = output.rgb, output.depth rgb_slice_1 = rgb_slice_1[0, -3:, -3:, -1] depth_slice_1 = depth_slice_1[0, -3:, -1] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = ldm3d_pipe.tokenizer( prompt, padding="max_length", max_length=ldm3d_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = ldm3d_pipe.text_encoder(text_inputs)[0] inputs["prompt_embeds"] = prompt_embeds # forward output = ldm3d_pipe(**inputs) rgb_slice_2, depth_slice_2 = output.rgb, output.depth rgb_slice_2 = rgb_slice_2[0, -3:, -3:, -1] depth_slice_2 = depth_slice_2[0, -3:, -1] assert np.abs(rgb_slice_1.flatten() - rgb_slice_2.flatten()).max() < 1e-4 assert np.abs(depth_slice_1.flatten() - depth_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) ldm3d_pipe = StableDiffusionLDM3DPipeline(**components) ldm3d_pipe = ldm3d_pipe.to(device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = ldm3d_pipe(**inputs, negative_prompt=negative_prompt) rgb, depth = output.rgb, output.depth rgb_slice = rgb[0, -3:, -3:, -1] depth_slice = depth[0, -3:, -1] assert rgb.shape == (1, 64, 64, 3) assert depth.shape == (1, 64, 64) expected_slice_rgb = np.array( [0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] ) expected_slice_depth = np.array([107.84738, 84.62802, 89.962135]) assert np.abs(rgb_slice.flatten() - expected_slice_rgb).max() < 1e-2 assert np.abs(depth_slice.flatten() - expected_slice_depth).max() < 1e-2 @nightly @require_torch_gpu class StableDiffusionLDM3DPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_ldm3d_stable_diffusion(self): ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d") ldm3d_pipe = ldm3d_pipe.to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth rgb_slice = rgb[0, -3:, -3:, -1].flatten() depth_slice = rgb[0, -3:, -1].flatten() assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512) expected_slice_rgb = np.array( [0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] ) expected_slice_depth = np.array( [0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] ) assert np.abs(rgb_slice - expected_slice_rgb).max() < 3e-3 assert np.abs(depth_slice - expected_slice_depth).max() < 3e-3 @nightly @require_torch_gpu class StableDiffusionPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_ldm3d(self): ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d").to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth expected_rgb_mean = 0.495586 expected_rgb_std = 0.33795515 expected_depth_mean = 112.48518 expected_depth_std = 98.489746 assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3 assert np.abs(expected_rgb_std - rgb.std()) < 1e-3 assert np.abs(expected_depth_mean - depth.mean()) < 1e-3 assert np.abs(expected_depth_std - depth.std()) < 1e-3 def test_ldm3d_v2(self): ldm3d_pipe = StableDiffusionLDM3DPipeline.from_pretrained("Intel/ldm3d-4c").to(torch_device) ldm3d_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) output = ldm3d_pipe(**inputs) rgb, depth = output.rgb, output.depth expected_rgb_mean = 0.4194127 expected_rgb_std = 0.35375586 expected_depth_mean = 0.5638502 expected_depth_std = 0.34686103 assert rgb.shape == (1, 512, 512, 3) assert depth.shape == (1, 512, 512, 1) assert np.abs(expected_rgb_mean - rgb.mean()) < 1e-3 assert np.abs(expected_rgb_std - rgb.std()) < 1e-3 assert np.abs(expected_depth_mean - depth.mean()) < 1e-3 assert np.abs(expected_depth_std - depth.std()) < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import time import traceback import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, logging, ) from diffusers.models.attention_processor import AttnProcessor from diffusers.utils.testing_utils import ( CaptureLogger, enable_full_determinism, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_stable_diffusion_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.unet.to(memory_format=torch.channels_last) sd_pipe.unet = torch.compile(sd_pipe.unet, mode="reduce-overhead", fullgraph=True) sd_pipe.set_progress_bar_config(disable=None) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.38019, 0.28647, 0.27321, 0.40377, 0.38290, 0.35446, 0.39218, 0.38165, 0.42239]) assert np.abs(image_slice - expected_slice).max() < 5e-3 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class StableDiffusionPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, time_cond_proj_dim=time_cond_proj_dim, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3203, 0.4555, 0.4711, 0.3505, 0.3973, 0.4650, 0.5137, 0.3392, 0.4045]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3454, 0.5349, 0.5185, 0.2808, 0.4509, 0.4612, 0.4655, 0.3601, 0.4315]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3454, 0.5349, 0.5185, 0.2808, 0.4509, 0.4612, 0.4655, 0.3601, 0.4315]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = sd_pipe.tokenizer( prompt, padding="max_length", max_length=sd_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = sd_pipe.text_encoder(text_inputs)[0] inputs["prompt_embeds"] = prompt_embeds # forward output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = sd_pipe.tokenizer( p, padding="max_length", max_length=sd_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) embeds.append(sd_pipe.text_encoder(text_inputs)[0]) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_prompt_embeds_with_plain_negative_prompt_list(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = negative_prompt prompt = 3 * [inputs.pop("prompt")] text_inputs = sd_pipe.tokenizer( prompt, padding="max_length", max_length=sd_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = sd_pipe.text_encoder(text_inputs)[0] inputs["prompt_embeds"] = prompt_embeds # forward output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_ddim_factor_8(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, height=136, width=136) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 136, 136, 3) expected_slice = np.array([0.4346, 0.5621, 0.5016, 0.3926, 0.4533, 0.4134, 0.5625, 0.5632, 0.5265]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = PNDMScheduler(skip_prk_steps=True) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3411, 0.5032, 0.4704, 0.3135, 0.4323, 0.4740, 0.5150, 0.3498, 0.4022]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None ) assert isinstance(pipe, StableDiffusionPipeline) assert isinstance(pipe.scheduler, LMSDiscreteScheduler) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3149, 0.5246, 0.4796, 0.3218, 0.4469, 0.4729, 0.5151, 0.3597, 0.3954]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3151, 0.5243, 0.4794, 0.3217, 0.4468, 0.4728, 0.5152, 0.3598, 0.3954]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3149, 0.5246, 0.4796, 0.3218, 0.4469, 0.4729, 0.5151, 0.3597, 0.3954]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_vae_slicing(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) image_count = 4 inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count output_1 = sd_pipe(**inputs) # make sure sliced vae decode yields the same result sd_pipe.enable_vae_slicing() inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * image_count output_2 = sd_pipe(**inputs) # there is a small discrepancy at image borders vs. full batch decode assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 3e-3 def test_stable_diffusion_vae_tiling(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() # make sure here that pndm scheduler skips prk components["safety_checker"] = None sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" # Test that tiled decode at 512x512 yields the same result as the non-tiled decode generator = torch.Generator(device=device).manual_seed(0) output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") # make sure tiled vae decode yields the same result sd_pipe.enable_vae_tiling() generator = torch.Generator(device=device).manual_seed(0) output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 5e-1 # test that tiled decode works with various shapes shapes = [(1, 4, 73, 97), (1, 4, 97, 73), (1, 4, 49, 65), (1, 4, 65, 49)] for shape in shapes: zeros = torch.zeros(shape).to(device) sd_pipe.vae.decode(zeros) def test_stable_diffusion_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3458, 0.5120, 0.4800, 0.3116, 0.4348, 0.4802, 0.5237, 0.3467, 0.3991]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_long_prompt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) do_classifier_free_guidance = True negative_prompt = None num_images_per_prompt = 1 logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") logger.setLevel(logging.WARNING) prompt = 100 * "@" with CaptureLogger(logger) as cap_logger: negative_text_embeddings, text_embeddings = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings is not None: text_embeddings = torch.cat([negative_text_embeddings, text_embeddings]) # 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25 assert cap_logger.out.count("@") == 25 negative_prompt = "Hello" with CaptureLogger(logger) as cap_logger_2: negative_text_embeddings_2, text_embeddings_2 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_2 is not None: text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) assert cap_logger.out == cap_logger_2.out prompt = 25 * "@" with CaptureLogger(logger) as cap_logger_3: negative_text_embeddings_3, text_embeddings_3 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_3 is not None: text_embeddings_3 = torch.cat([negative_text_embeddings_3, text_embeddings_3]) assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape assert text_embeddings.shape[1] == 77 assert cap_logger_3.out == "" def test_stable_diffusion_height_width_opt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" output = sd_pipe(prompt, num_inference_steps=1, output_type="np") image_shape = output.images[0].shape[:2] assert image_shape == (64, 64) output = sd_pipe(prompt, num_inference_steps=1, height=96, width=96, output_type="np") image_shape = output.images[0].shape[:2] assert image_shape == (96, 96) config = dict(sd_pipe.unet.config) config["sample_size"] = 96 sd_pipe.unet = UNet2DConditionModel.from_config(config).to(torch_device) output = sd_pipe(prompt, num_inference_steps=1, output_type="np") image_shape = output.images[0].shape[:2] assert image_shape == (192, 192) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_freeu_enabled(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" output = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) output_freeu = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images assert not np.allclose( output[0, -3:, -3:, -1], output_freeu[0, -3:, -3:, -1] ), "Enabling of FreeU should lead to different results." def test_freeu_disabled(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" output = sd_pipe(prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0)).images sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) sd_pipe.disable_freeu() freeu_keys = {"s1", "s2", "b1", "b2"} for upsample_block in sd_pipe.unet.up_blocks: for key in freeu_keys: assert getattr(upsample_block, key) is None, f"Disabling of FreeU should have set {key} to None." output_no_freeu = sd_pipe( prompt, num_inference_steps=1, output_type="np", generator=torch.manual_seed(0) ).images assert np.allclose( output[0, -3:, -3:, -1], output_no_freeu[0, -3:, -3:, -1] ), "Disabling of FreeU should lead to results similar to the default pipeline results." def test_fused_qkv_projections(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images original_image_slice = image[0, -3:, -3:, -1] sd_pipe.fuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_fused = image[0, -3:, -3:, -1] sd_pipe.unfuse_qkv_projections() inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_disabled = image[0, -3:, -3:, -1] assert np.allclose( original_image_slice, image_slice_fused, atol=1e-2, rtol=1e-2 ), "Fusion of QKV projections shouldn't affect the outputs." assert np.allclose( image_slice_fused, image_slice_disabled, atol=1e-2, rtol=1e-2 ), "Outputs, with QKV projection fusion enabled, shouldn't change when fused QKV projections are disabled." assert np.allclose( original_image_slice, image_slice_disabled, atol=1e-2, rtol=1e-2 ), "Original outputs should match when fused QKV projections are disabled." def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" num_inference_steps = 3 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) @slow @require_torch_gpu class StableDiffusionPipelineSlowTests(unittest.TestCase): def setUp(self): gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_1_1_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.4363, 0.4355, 0.3667, 0.4066, 0.3970, 0.3866, 0.4394, 0.4356, 0.4059]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_v1_4_with_freeu(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 sd_pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) image = sd_pipe(**inputs).images image = image[0, -3:, -3:, -1].flatten() expected_image = [0.0721, 0.0588, 0.0268, 0.0384, 0.0636, 0.0, 0.0429, 0.0344, 0.0309] max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_1_4_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5740, 0.4784, 0.3162, 0.6358, 0.5831, 0.5505, 0.5082, 0.5631, 0.5575]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.38019, 0.28647, 0.27321, 0.40377, 0.38290, 0.35446, 0.39218, 0.38165, 0.42239]) assert np.abs(image_slice - expected_slice).max() < 1e-4 def test_stable_diffusion_lms(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.10542, 0.09620, 0.07332, 0.09015, 0.09382, 0.07597, 0.08496, 0.07806, 0.06455]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_dpm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.03503, 0.03494, 0.01087, 0.03128, 0.02552, 0.00803, 0.00742, 0.00372, 0.00000]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_stable_diffusion_attention_slicing(self): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.unet.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # enable attention slicing pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image_sliced = pipe(**inputs).images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 3.75 GB is allocated assert mem_bytes < 3.75 * 10**9 # disable slicing pipe.disable_attention_slicing() pipe.unet.set_default_attn_processor() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images # make sure that more than 3.75 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 3.75 * 10**9 max_diff = numpy_cosine_similarity_distance(image_sliced.flatten(), image.flatten()) assert max_diff < 1e-3 def test_stable_diffusion_vae_slicing(self): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() # enable vae slicing pipe.enable_vae_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) inputs["prompt"] = [inputs["prompt"]] * 4 inputs["latents"] = torch.cat([inputs["latents"]] * 4) image_sliced = pipe(**inputs).images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 4 GB is allocated assert mem_bytes < 4e9 # disable vae slicing pipe.disable_vae_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) inputs["prompt"] = [inputs["prompt"]] * 4 inputs["latents"] = torch.cat([inputs["latents"]] * 4) image = pipe(**inputs).images # make sure that more than 4 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 4e9 # There is a small discrepancy at the image borders vs. a fully batched version. max_diff = numpy_cosine_similarity_distance(image_sliced.flatten(), image.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_vae_tiling(self): torch.cuda.reset_peak_memory_stats() model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionPipeline.from_pretrained( model_id, revision="fp16", torch_dtype=torch.float16, safety_checker=None ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.unet = pipe.unet.to(memory_format=torch.channels_last) pipe.vae = pipe.vae.to(memory_format=torch.channels_last) prompt = "a photograph of an astronaut riding a horse" # enable vae tiling pipe.enable_vae_tiling() pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) output_chunked = pipe( [prompt], width=1024, height=1024, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy", ) image_chunked = output_chunked.images mem_bytes = torch.cuda.max_memory_allocated() # disable vae tiling pipe.disable_vae_tiling() generator = torch.Generator(device="cpu").manual_seed(0) output = pipe( [prompt], width=1024, height=1024, generator=generator, guidance_scale=7.5, num_inference_steps=2, output_type="numpy", ) image = output.images assert mem_bytes < 1e10 max_diff = numpy_cosine_similarity_distance(image_chunked.flatten(), image.flatten()) assert max_diff < 1e-2 def test_stable_diffusion_fp16_vs_autocast(self): # this test makes sure that the original model with autocast # and the new model with fp16 yield the same result pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, dtype=torch.float16) image_fp16 = pipe(**inputs).images with torch.autocast(torch_device): inputs = self.get_inputs(torch_device) image_autocast = pipe(**inputs).images # Make sure results are close enough diff = np.abs(image_fp16.flatten() - image_autocast.flatten()) # They ARE different since ops are not run always at the same precision # however, they should be extremely close. assert diff.mean() < 2e-2 def test_stable_diffusion_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.5693, -0.3018, -0.9746, 0.0518, -0.8770, 0.7559, -1.7402, 0.1022, 1.1582] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.1958, -0.2993, -1.0166, -0.5005, -0.4810, 0.6162, -0.9492, 0.6621, 1.4492] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == inputs["num_inference_steps"] def test_stable_diffusion_low_cpu_mem_usage(self): pipeline_id = "CompVis/stable-diffusion-v1-4" start_time = time.time() pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline_low_cpu_mem_usage.to(torch_device) low_cpu_mem_usage_time = time.time() - start_time start_time = time.time() _ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False) normal_load_time = time.time() - start_time assert 2 * low_cpu_mem_usage_time < normal_load_time def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.8 GB is allocated assert mem_bytes < 2.8 * 10**9 def test_stable_diffusion_pipeline_with_model_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() inputs = self.get_inputs(torch_device, dtype=torch.float16) # Normal inference pipe = StableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) outputs = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # With model offloading # Reload but don't move to cuda pipe = StableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, dtype=torch.float16) outputs_offloaded = pipe(**inputs) mem_bytes_offloaded = torch.cuda.max_memory_allocated() images = outputs.images offloaded_images = outputs_offloaded.images max_diff = numpy_cosine_similarity_distance(images.flatten(), offloaded_images.flatten()) assert max_diff < 1e-3 assert mem_bytes_offloaded < mem_bytes assert mem_bytes_offloaded < 3.5 * 10**9 for module in pipe.text_encoder, pipe.unet, pipe.vae: assert module.device == torch.device("cpu") # With attention slicing torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_attention_slicing() _ = pipe(**inputs) mem_bytes_slicing = torch.cuda.max_memory_allocated() assert mem_bytes_slicing < mem_bytes_offloaded assert mem_bytes_slicing < 3 * 10**9 def test_stable_diffusion_textual_inversion(self): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") a111_file_neg = hf_hub_download( "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" ) pipe.load_textual_inversion(a111_file) pipe.load_textual_inversion(a111_file_neg) pipe.to("cuda") generator = torch.Generator(device="cpu").manual_seed(1) prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>" neg_prompt = "Style-Winter-neg" image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 8e-1 def test_stable_diffusion_textual_inversion_with_model_cpu_offload(self): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.enable_model_cpu_offload() pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") a111_file_neg = hf_hub_download( "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" ) pipe.load_textual_inversion(a111_file) pipe.load_textual_inversion(a111_file_neg) generator = torch.Generator(device="cpu").manual_seed(1) prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>" neg_prompt = "Style-Winter-neg" image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 8e-1 def test_stable_diffusion_textual_inversion_with_sequential_cpu_offload(self): pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") pipe.enable_sequential_cpu_offload() pipe.load_textual_inversion("sd-concepts-library/low-poly-hd-logos-icons") a111_file = hf_hub_download("hf-internal-testing/text_inv_embedding_a1111_format", "winter_style.pt") a111_file_neg = hf_hub_download( "hf-internal-testing/text_inv_embedding_a1111_format", "winter_style_negative.pt" ) pipe.load_textual_inversion(a111_file) pipe.load_textual_inversion(a111_file_neg) generator = torch.Generator(device="cpu").manual_seed(1) prompt = "An logo of a turtle in strong Style-Winter with <low-poly-hd-logos-icons>" neg_prompt = "Style-Winter-neg" image = pipe(prompt=prompt, negative_prompt=neg_prompt, generator=generator, output_type="np").images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_inv/winter_logo_style.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 8e-1 @require_python39_or_higher @require_torch_2 def test_stable_diffusion_compile(self): seed = 0 inputs = self.get_inputs(torch_device, seed=seed) # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=inputs) def test_stable_diffusion_lcm(self): unet = UNet2DConditionModel.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", subfolder="unet") sd_pipe = StableDiffusionPipeline.from_pretrained("Lykon/dreamshaper-7", unet=unet).to(torch_device) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 6 inputs["output_type"] = "pil" image = sd_pipe(**inputs).images[0] expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_full/stable_diffusion_lcm.png" ) image = sd_pipe.image_processor.pil_to_numpy(image) expected_image = sd_pipe.image_processor.pil_to_numpy(expected_image) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-2 @slow @require_torch_gpu class StableDiffusionPipelineCkptTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_download_from_hub(self): ckpt_paths = [ "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt", "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix_base.ckpt", ] for ckpt_path in ckpt_paths: pipe = StableDiffusionPipeline.from_single_file(ckpt_path, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] assert image_out.shape == (512, 512, 3) def test_download_local(self): filename = hf_hub_download("runwayml/stable-diffusion-v1-5", filename="v1-5-pruned-emaonly.ckpt") pipe = StableDiffusionPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt" pipe = StableDiffusionPipeline.from_single_file(ckpt_path) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") generator = torch.Generator(device="cpu").manual_seed(0) image_ckpt = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") generator = torch.Generator(device="cpu").manual_seed(0) image = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_ckpt.flatten()) assert max_diff < 1e-3 @nightly @require_torch_gpu class StableDiffusionPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_1_4_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_1_5_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_5_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 3e-3 def test_stable_diffusion_lms(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_euler(self): sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4").to(torch_device) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_text2img/stable_diffusion_1_4_euler.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_gligen_text_image.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import ( CLIPProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, StableDiffusionGLIGENTextImagePipeline, UNet2DConditionModel, ) from diffusers.pipelines.stable_diffusion import CLIPImageProjection from diffusers.utils import load_image from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class GligenTextImagePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionGLIGENTextImagePipeline params = TEXT_TO_IMAGE_PARAMS | {"gligen_phrases", "gligen_images", "gligen_boxes"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_type="gated-text-image", ) # unet.position_net = PositionNet(32,32) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image_encoder_config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) processor = CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14") image_project = CLIPImageProjection(hidden_size=32) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": image_encoder, "image_project": image_project, "processor": processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) gligen_images = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/gligen/livingroom_modern.png" ) inputs = { "prompt": "A modern livingroom", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "gligen_phrases": ["a birthday cake"], "gligen_images": [gligen_images], "gligen_boxes": [[0.2676, 0.6088, 0.4773, 0.7183]], "output_type": "np", } return inputs def test_stable_diffusion_gligen_text_image_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENTextImagePipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5069, 0.5561, 0.4577, 0.4792, 0.5203, 0.4089, 0.5039, 0.4919, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_gligen_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENTextImagePipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.425, 0.494, 0.429, 0.469, 0.525, 0.417, 0.533, 0.5, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_upscale.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionUpscalePipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import ( floats_tensor, is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionUpscalePipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): # TODO: is there an appropriate internal test set? hub_checkpoint = "ssube/stable-diffusion-x4-upscaler-onnx" def get_dummy_inputs(self, seed=0): image = floats_tensor((1, 3, 128, 128), rng=random.Random(seed)) generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_pipeline_default_ddpm(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() # started as 128, should now be 512 assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.6957, 0.7002, 0.7186, 0.6881, 0.6693, 0.6910, 0.7445, 0.7274, 0.7056]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.7349, 0.7347, 0.7034, 0.7696, 0.7876, 0.7597, 0.7916, 0.8085, 0.8036]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.7659278, 0.76437664, 0.75579107, 0.7691116, 0.77666986, 0.7727672, 0.7758664, 0.7812226, 0.76942515] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.6974782, 0.68902093, 0.70135885, 0.7583618, 0.7804545, 0.7854912, 0.78667426, 0.78743863, 0.78070223] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.77424496, 0.773601, 0.7645288, 0.7769598, 0.7772739, 0.7738688, 0.78187233, 0.77879584, 0.767043] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_ddpm(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((128, 128)) # using the PNDM scheduler by default pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def test_inference_k_lms(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((128, 128)) lms_scheduler = LMSDiscreteScheduler.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", subfolder="scheduler" ) pipe = OnnxStableDiffusionUpscalePipeline.from_pretrained( "ssube/stable-diffusion-x4-upscaler-onnx", scheduler=lms_scheduler, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, guidance_scale=7.5, num_inference_steps=20, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array( [0.50173753, 0.50223356, 0.502039, 0.50233036, 0.5023725, 0.5022601, 0.5018758, 0.50234085, 0.50241566] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_cycle_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class CycleDiffusionPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = CycleDiffusionPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "negative_prompt", "height", "width", "negative_prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"source_prompt"}) image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000, clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "An astronaut riding an elephant", "source_prompt": "An astronaut riding a horse", "image": image, "generator": generator, "num_inference_steps": 2, "eta": 0.1, "strength": 0.8, "guidance_scale": 3, "source_guidance_scale": 1, "output_type": "numpy", } return inputs def test_stable_diffusion_cycle(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = CycleDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) images = output.images image_slice = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) expected_slice = np.array([0.4459, 0.4943, 0.4544, 0.6643, 0.5474, 0.4327, 0.5701, 0.5959, 0.5179]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_cycle_fp16(self): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.half() pipe = CycleDiffusionPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs) images = output.images image_slice = images[0, -3:, -3:, -1] assert images.shape == (1, 32, 32, 3) expected_slice = np.array([0.3506, 0.4543, 0.446, 0.4575, 0.5195, 0.4155, 0.5273, 0.518, 0.4116]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @unittest.skip("non-deterministic pipeline") def test_inference_batch_single_identical(self): return super().test_inference_batch_single_identical() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() @nightly @require_torch_gpu class CycleDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_cycle_diffusion_pipeline_fp16(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy" ) init_image = init_image.resize((512, 512)) model_id = "CompVis/stable-diffusion-v1-4" scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = CycleDiffusionPipeline.from_pretrained( model_id, scheduler=scheduler, safety_checker=None, torch_dtype=torch.float16, revision="fp16" ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() source_prompt = "A black colored car" prompt = "A blue colored car" generator = torch.manual_seed(0) output = pipe( prompt=prompt, source_prompt=source_prompt, image=init_image, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=generator, output_type="np", ) image = output.images # the values aren't exactly equal, but the images look the same visually assert np.abs(image - expected_image).max() < 5e-1 def test_cycle_diffusion_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/cycle-diffusion/black_colored_car.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy" ) init_image = init_image.resize((512, 512)) model_id = "CompVis/stable-diffusion-v1-4" scheduler = DDIMScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = CycleDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() source_prompt = "A black colored car" prompt = "A blue colored car" generator = torch.manual_seed(0) output = pipe( prompt=prompt, source_prompt=source_prompt, image=init_image, num_inference_steps=100, eta=0.1, strength=0.85, guidance_scale=3, source_guidance_scale=1, generator=generator, output_type="np", ) image = output.images assert np.abs(image - expected_image).max() < 2e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_pndm(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A red cat sitting on a park bench" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_k_lms(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) lms_scheduler = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting", subfolder="scheduler", revision="onnx" ) pipe = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", revision="onnx", scheduler=lms_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A red cat sitting on a park bench" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, guidance_scale=7.5, num_inference_steps=20, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) expected_slice = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImg2ImgPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import ( floats_tensor, is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionImg2ImgPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def get_dummy_inputs(self, seed=0): image = floats_tensor((1, 3, 128, 128), rng=random.Random(seed)) generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_pipeline_default_ddim(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_lms(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) # warmup pass to apply optimizations _ = pipe(**self.get_dummy_inputs()) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionImg2ImgPipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_pndm(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((768, 512)) # using the PNDM scheduler by default pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) expected_slice = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def test_inference_k_lms(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) init_image = init_image.resize((768, 512)) lms_scheduler = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) pipe = OnnxStableDiffusionImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=lms_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "A fantasy landscape, trending on artstation" generator = np.random.RandomState(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=generator, output_type="np", ) images = output.images image_slice = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) expected_slice = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_pix2pix_zero.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DDPMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, StableDiffusionPix2PixZeroPipeline, UNet2DConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, load_pt, nightly, require_torch_gpu, skip_mps, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() @skip_mps class StableDiffusionPix2PixZeroPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionPix2PixZeroPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"image"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def setUpClass(cls): cls.source_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/src_emb_0.pt" ) cls.target_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/tgt_emb_0.pt" ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler() inverse_scheduler = DDIMInverseScheduler() torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "inverse_scheduler": inverse_scheduler, "caption_generator": None, "caption_processor": None, } return components def get_dummy_inputs(self, device, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "cross_attention_guidance_amount": 0.15, "source_embeds": self.source_embeds, "target_embeds": self.target_embeds, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs(self, device, seed=0): dummy_image = floats_tensor((2, 3, 32, 32), rng=random.Random(seed)).to(torch_device) dummy_image = dummy_image / 2 + 0.5 generator = torch.manual_seed(seed) inputs = { "prompt": [ "A painting of a squirrel eating a burger", "A painting of a burger eating a squirrel", ], "image": dummy_image.cpu(), "num_inference_steps": 2, "guidance_scale": 6.0, "generator": generator, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs_by_type(self, device, seed=0, input_image_type="pt", output_type="np"): inputs = self.get_dummy_inversion_inputs(device, seed) if input_image_type == "pt": image = inputs["image"] elif input_image_type == "np": image = VaeImageProcessor.pt_to_numpy(inputs["image"]) elif input_image_type == "pil": image = VaeImageProcessor.pt_to_numpy(inputs["image"]) image = VaeImageProcessor.numpy_to_pil(image) else: raise ValueError(f"unsupported input_image_type {input_image_type}") inputs["image"] = image inputs["output_type"] = output_type return inputs def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) def test_stable_diffusion_pix2pix_zero_inversion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) inputs["image"] = inputs["image"][:1] inputs["prompt"] = inputs["prompt"][:1] image = sd_pipe.invert(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4732, 0.4630, 0.5722, 0.5103, 0.5140, 0.5622, 0.5104, 0.5390, 0.5020]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = sd_pipe.invert(**inputs).images image_slice = image[1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.6046, 0.5400, 0.4902, 0.4448, 0.4694, 0.5498, 0.4857, 0.5073, 0.5089]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4863, 0.5053, 0.5033, 0.4007, 0.3571, 0.4768, 0.5176, 0.5277, 0.4940]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5177, 0.5097, 0.5047, 0.4076, 0.3667, 0.4767, 0.5238, 0.5307, 0.4958]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5421, 0.5525, 0.6085, 0.5279, 0.4658, 0.5317, 0.4418, 0.4815, 0.5132]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_ddpm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDPMScheduler() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4861, 0.5053, 0.5038, 0.3994, 0.3562, 0.4768, 0.5172, 0.5280, 0.4938]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_outputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) output_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pt")).images output_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="np")).images output_pil = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, output_type="pil")).images max_diff = np.abs(output_pt.cpu().numpy().transpose(0, 2, 3, 1) - output_np).max() self.assertLess(max_diff, 1e-4, "`output_type=='pt'` generate different results from `output_type=='np'`") max_diff = np.abs(np.array(output_pil[0]) - (output_np[0] * 255).round()).max() self.assertLess(max_diff, 2.0, "`output_type=='pil'` generate different results from `output_type=='np'`") def test_stable_diffusion_pix2pix_zero_inversion_pt_np_pil_inputs_equivalent(self): device = torch_device components = self.get_dummy_components() sd_pipe = StableDiffusionPix2PixZeroPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) out_input_pt = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="pt")).images out_input_np = sd_pipe.invert(**self.get_dummy_inversion_inputs_by_type(device, input_image_type="np")).images out_input_pil = sd_pipe.invert( **self.get_dummy_inversion_inputs_by_type(device, input_image_type="pil") ).images max_diff = np.abs(out_input_pt - out_input_np).max() self.assertLess(max_diff, 1e-4, "`input_type=='pt'` generate different result from `input_type=='np'`") assert_mean_pixel_difference(out_input_pil, out_input_np, expected_max_diff=1) # Non-determinism caused by the scheduler optimizing the latent inputs during inference @unittest.skip("non-deterministic pipeline") def test_inference_batch_single_identical(self): return super().test_inference_batch_single_identical() @nightly @require_torch_gpu class StableDiffusionPix2PixZeroPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): cls.source_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat.pt" ) cls.target_embeds = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/dog.pt" ) def get_inputs(self, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "turn him into a cyborg", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "cross_attention_guidance_amount": 0.15, "source_embeds": self.source_embeds, "target_embeds": self.target_embeds, "output_type": "numpy", } return inputs def test_stable_diffusion_pix2pix_zero_default(self): pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5742, 0.5757, 0.5747, 0.5781, 0.5688, 0.5713, 0.5742, 0.5664, 0.5747]) assert np.abs(expected_slice - image_slice).max() < 5e-2 def test_stable_diffusion_pix2pix_zero_k_lms(self): pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.6367, 0.5459, 0.5146, 0.5479, 0.4905, 0.4753, 0.4961, 0.4629, 0.4624]) assert np.abs(expected_slice - image_slice).max() < 5e-2 def test_stable_diffusion_pix2pix_zero_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.1345, 0.268, 0.1539, 0.0726, 0.0959, 0.2261, -0.2673, 0.0277, -0.2062]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.1393, 0.2637, 0.1617, 0.0724, 0.0987, 0.2271, -0.2666, 0.0299, -0.2104]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs() _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 8.2 GB is allocated assert mem_bytes < 8.2 * 10**9 @nightly @require_torch_gpu class InversionPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/cat_6.png" ) raw_image = raw_image.convert("RGB").resize((512, 512)) cls.raw_image = raw_image def test_stable_diffusion_pix2pix_inversion(self): pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) caption = "a photography of a cat with flowers" pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe.invert(caption, image=self.raw_image, generator=generator, num_inference_steps=10) inv_latents = output[0] image_slice = inv_latents[0, -3:, -3:, -1].flatten() assert inv_latents.shape == (1, 4, 64, 64) expected_slice = np.array([0.8447, -0.0730, 0.7588, -1.2070, -0.4678, 0.1511, -0.8555, 1.1816, -0.7666]) assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2 def test_stable_diffusion_2_pix2pix_inversion(self): pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16 ) pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) caption = "a photography of a cat with flowers" pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe.invert(caption, image=self.raw_image, generator=generator, num_inference_steps=10) inv_latents = output[0] image_slice = inv_latents[0, -3:, -3:, -1].flatten() assert inv_latents.shape == (1, 4, 64, 64) expected_slice = np.array([0.8970, -0.1611, 0.4766, -1.1162, -0.5923, 0.1050, -0.9678, 1.0537, -0.6050]) assert np.abs(expected_slice - image_slice.cpu().numpy()).max() < 5e-2 def test_stable_diffusion_2_pix2pix_full(self): # numpy array of https://huggingface.co/datasets/hf-internal-testing/diffusers-images/blob/main/pix2pix/dog_2.png expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/pix2pix/dog_2.npy" ) pipe = StableDiffusionPix2PixZeroPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16 ) pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) caption = "a photography of a cat with flowers" pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe.invert(caption, image=self.raw_image, generator=generator) inv_latents = output[0] source_prompts = 4 * ["a cat sitting on the street", "a cat playing in the field", "a face of a cat"] target_prompts = 4 * ["a dog sitting on the street", "a dog playing in the field", "a face of a dog"] source_embeds = pipe.get_embeds(source_prompts) target_embeds = pipe.get_embeds(target_prompts) image = pipe( caption, source_embeds=source_embeds, target_embeds=target_embeds, num_inference_steps=125, cross_attention_guidance_amount=0.015, generator=generator, latents=inv_latents, negative_prompt=caption, output_type="np", ).images mean_diff = np.abs(expected_image - image).mean() assert mean_diff < 0.25
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_gligen.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, StableDiffusionGLIGENPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class GligenPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionGLIGENPipeline params = TEXT_TO_IMAGE_PARAMS | {"gligen_phrases", "gligen_boxes"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_type="gated", ) # unet.position_net = PositionNet(32,32) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A modern livingroom", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "gligen_phrases": ["a birthday cake"], "gligen_boxes": [[0.2676, 0.6088, 0.4773, 0.7183]], "output_type": "np", } return inputs def test_stable_diffusion_gligen_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5069, 0.5561, 0.4577, 0.4792, 0.5203, 0.4089, 0.5039, 0.4919, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_gligen_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.425, 0.494, 0.429, 0.469, 0.525, 0.417, 0.533, 0.5, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_paradigms.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMParallelScheduler, DDPMParallelScheduler, StableDiffusionParadigmsPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, nightly, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionParadigmsPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionParadigmsPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMParallelScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "a photograph of an astronaut riding a horse", "generator": generator, "num_inference_steps": 10, "guidance_scale": 6.0, "output_type": "numpy", "parallel": 3, "debug": True, } return inputs def test_stable_diffusion_paradigms_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionParadigmsPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4773, 0.5417, 0.4723, 0.4925, 0.5631, 0.4752, 0.5240, 0.4935, 0.5023]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_paradigms_default_case_ddpm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() torch.manual_seed(0) components["scheduler"] = DDPMParallelScheduler() torch.manual_seed(0) sd_pipe = StableDiffusionParadigmsPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3573, 0.4420, 0.4960, 0.4799, 0.3796, 0.3879, 0.4819, 0.4365, 0.4468]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # override to speed the overall test timing up. def test_inference_batch_consistent(self): super().test_inference_batch_consistent(batch_sizes=[1, 2]) # override to speed the overall test timing up. def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=3e-3) def test_stable_diffusion_paradigms_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionParadigmsPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4771, 0.5420, 0.4683, 0.4918, 0.5636, 0.4725, 0.5230, 0.4923, 0.5015]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class StableDiffusionParadigmsPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0): generator = torch.Generator(device=torch_device).manual_seed(seed) inputs = { "prompt": "a photograph of an astronaut riding a horse", "generator": generator, "num_inference_steps": 10, "guidance_scale": 7.5, "output_type": "numpy", "parallel": 3, "debug": True, } return inputs def test_stable_diffusion_paradigms_default(self): model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMParallelScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionParadigmsPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9622, 0.9602, 0.9748, 0.9591, 0.9630, 0.9691, 0.9661, 0.9631, 0.9741]) assert np.abs(expected_slice - image_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint_legacy.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInpaintPipelineLegacy, UNet2DConditionModel, UNet2DModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, preprocess_image, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class StableDiffusionInpaintLegacyPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_cond_unet_inpaint(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vq_model(self): torch.manual_seed(0) model = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=3, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract def test_stable_diffusion_inpaint_legacy(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ) image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4941, 0.5396, 0.4689, 0.6338, 0.5392, 0.4094, 0.5477, 0.5904, 0.5165]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_legacy_batched(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") init_images_tens = preprocess_image(init_image, batch_size=2) init_masks_tens = init_images_tens + 4 # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) images = sd_pipe( [prompt] * 2, generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_images_tens, mask_image=init_masks_tens, ).images assert images.shape == (2, 32, 32, 3) image_slice_0 = images[0, -3:, -3:, -1].flatten() image_slice_1 = images[1, -3:, -3:, -1].flatten() expected_slice_0 = np.array([0.4697, 0.3770, 0.4096, 0.4653, 0.4497, 0.4183, 0.3950, 0.4668, 0.4672]) expected_slice_1 = np.array([0.4105, 0.4987, 0.5771, 0.4921, 0.4237, 0.5684, 0.5496, 0.4645, 0.5272]) assert np.abs(expected_slice_0 - image_slice_0).max() < 1e-2 assert np.abs(expected_slice_1 - image_slice_1).max() < 1e-2 def test_stable_diffusion_inpaint_legacy_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" negative_prompt = "french fries" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( prompt, negative_prompt=negative_prompt, generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4941, 0.5396, 0.4689, 0.6338, 0.5392, 0.4094, 0.5477, 0.5904, 0.5165]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_legacy_num_images_per_prompt(self): device = "cpu" unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB") mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionInpaintPipelineLegacy( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" # test num_images_per_prompt=1 (default) images = sd_pipe( prompt, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ).images assert images.shape == (1, 32, 32, 3) # test num_images_per_prompt=1 (default) for batch of prompts batch_size = 2 images = sd_pipe( [prompt] * batch_size, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, ).images assert images.shape == (batch_size, 32, 32, 3) # test num_images_per_prompt for single prompt num_images_per_prompt = 2 images = sd_pipe( prompt, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, num_images_per_prompt=num_images_per_prompt, ).images assert images.shape == (num_images_per_prompt, 32, 32, 3) # test num_images_per_prompt for batch of prompts batch_size = 2 images = sd_pipe( [prompt] * batch_size, num_inference_steps=2, output_type="np", image=init_image, mask_image=mask_image, num_images_per_prompt=num_images_per_prompt, ).images assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3) @slow @require_torch_gpu class StableDiffusionInpaintLegacyPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, generator_device="cpu", seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "A red cat sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint_legacy_pndm(self): pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.5665, 0.6117, 0.6430, 0.4057, 0.4594, 0.5658, 0.1596, 0.3106, 0.4305]) assert np.abs(expected_slice - image_slice).max() < 3e-3 def test_stable_diffusion_inpaint_legacy_batched(self): pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() inputs["prompt"] = [inputs["prompt"]] * 2 inputs["image"] = preprocess_image(inputs["image"], batch_size=2) mask = inputs["mask_image"].convert("L") mask = np.array(mask).astype(np.float32) / 255.0 mask = torch.from_numpy(1 - mask) masks = torch.vstack([mask[None][None]] * 2) inputs["mask_image"] = masks image = pipe(**inputs).images assert image.shape == (2, 512, 512, 3) image_slice_0 = image[0, 253:256, 253:256, -1].flatten() image_slice_1 = image[1, 253:256, 253:256, -1].flatten() expected_slice_0 = np.array( [0.52093095, 0.4176447, 0.32752383, 0.6175223, 0.50563973, 0.36470804, 0.65460044, 0.5775188, 0.44332123] ) expected_slice_1 = np.array( [0.3592432, 0.4233033, 0.3914635, 0.31014425, 0.3702293, 0.39412856, 0.17526966, 0.2642669, 0.37480092] ) assert np.abs(expected_slice_0 - image_slice_0).max() < 3e-3 assert np.abs(expected_slice_1 - image_slice_1).max() < 3e-3 def test_stable_diffusion_inpaint_legacy_k_lms(self): pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.4534, 0.4467, 0.4329, 0.4329, 0.4339, 0.4220, 0.4244, 0.4332, 0.4426]) assert np.abs(expected_slice - image_slice).max() < 3e-3 def test_stable_diffusion_inpaint_legacy_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.5977, 1.5449, 1.0586, -0.3250, 0.7383, -0.0862, 0.4631, -0.2571, -1.1289]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.5190, 1.1621, 0.6885, 0.2424, 0.3337, -0.1617, 0.6914, -0.1957, -0.5474]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 callback_fn.has_been_called = False pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 2 @nightly @require_torch_gpu class StableDiffusionInpaintLegacyPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "A red cat sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 50, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_inpaint_pndm(self): sd_pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint_legacy/stable_diffusion_1_5_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_ddim(self): sd_pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint_legacy/stable_diffusion_1_5_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_lms(self): sd_pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint_legacy/stable_diffusion_1_5_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_dpm(self): sd_pipe = StableDiffusionInpaintPipelineLegacy.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 30 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint_legacy/stable_diffusion_1_5_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_k_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import StableDiffusionKDiffusionPipeline from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device enable_full_determinism() @nightly @require_torch_gpu class StableDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_1(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_euler") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_2(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_euler") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=9.0, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-1 def test_stable_diffusion_karras_sigmas(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_dpmpp_2m") prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe( [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=15, output_type="np", use_karras_sigmas=True, ) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_noise_sampler_seed(self): sd_pipe = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4") sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) sd_pipe.set_scheduler("sample_dpmpp_sde") prompt = "A painting of a squirrel eating a burger" seed = 0 images1 = sd_pipe( [prompt], generator=torch.manual_seed(seed), noise_sampler_seed=seed, guidance_scale=9.0, num_inference_steps=20, output_type="np", ).images images2 = sd_pipe( [prompt], generator=torch.manual_seed(seed), noise_sampler_seed=seed, guidance_scale=9.0, num_inference_steps=20, output_type="np", ).images assert images1.shape == (1, 512, 512, 3) assert images2.shape == (1, 512, 512, 3) assert np.abs(images1.flatten() - images2.flatten()).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_onnx_stable_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import numpy as np from diffusers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionPipeline, PNDMScheduler, ) from diffusers.utils.testing_utils import is_onnx_available, nightly, require_onnxruntime, require_torch_gpu from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class OnnxStableDiffusionPipelineFastTests(OnnxPipelineTesterMixin, unittest.TestCase): hub_checkpoint = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def get_dummy_inputs(self, seed=0): generator = np.random.RandomState(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_pipeline_default_ddim(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65072, 0.58492, 0.48219, 0.55521, 0.53180, 0.55939, 0.50697, 0.39800, 0.46455]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_pndm(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=True) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.65863, 0.59425, 0.49326, 0.56313, 0.53875, 0.56627, 0.51065, 0.39777, 0.46330]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_lms(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_euler(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53755, 0.60786, 0.47402, 0.49488, 0.51869, 0.49819, 0.47985, 0.38957, 0.44279]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_euler_ancestral(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53817, 0.60812, 0.47384, 0.49530, 0.51894, 0.49814, 0.47984, 0.38958, 0.44271]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_pipeline_dpm_multistep(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) expected_slice = np.array([0.53895, 0.60808, 0.47933, 0.49608, 0.51886, 0.49950, 0.48053, 0.38957, 0.44200]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_prompt_embeds(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs() prompt = 3 * [inputs.pop("prompt")] text_inputs = pipe.tokenizer( prompt, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_inputs = text_inputs["input_ids"] prompt_embeds = pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0] inputs["prompt_embeds"] = prompt_embeds # forward output = pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_negative_prompt_embeds(self): pipe = OnnxStableDiffusionPipeline.from_pretrained(self.hub_checkpoint, provider="CPUExecutionProvider") pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs() negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] inputs = self.get_dummy_inputs() prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = pipe.tokenizer( p, padding="max_length", max_length=pipe.tokenizer.model_max_length, truncation=True, return_tensors="np", ) text_inputs = text_inputs["input_ids"] embeds.append(pipe.text_encoder(input_ids=text_inputs.astype(np.int32))[0]) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 @nightly @require_onnxruntime @require_torch_gpu class OnnxStableDiffusionPipelineIntegrationTests(unittest.TestCase): @property def gpu_provider(self): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def gpu_options(self): options = ort.SessionOptions() options.enable_mem_pattern = False return options def test_inference_default_pndm(self): # using the PNDM scheduler by default sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" np.random.seed(0) output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=10, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0452, 0.0390, 0.0087, 0.0350, 0.0617, 0.0364, 0.0544, 0.0523, 0.0720]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_ddim(self): ddim_scheduler = DDIMScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=ddim_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "open neural network exchange" generator = np.random.RandomState(0) output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2867, 0.1974, 0.1481, 0.7294, 0.7251, 0.6667, 0.4194, 0.5642, 0.6486]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_k_lms(self): lms_scheduler = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-v1-5", subfolder="scheduler", revision="onnx" ) sd_pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", scheduler=lms_scheduler, safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) sd_pipe.set_progress_bar_config(disable=None) prompt = "open neural network exchange" generator = np.random.RandomState(0) output = sd_pipe([prompt], guidance_scale=7.5, num_inference_steps=10, generator=generator, output_type="np") image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.2306, 0.1959, 0.1593, 0.6549, 0.6394, 0.5408, 0.5065, 0.6010, 0.6161]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_intermediate_state(self): number_of_steps = 0 def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None: test_callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 0: assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.6772, -0.3835, -1.2456, 0.1905, -1.0974, 0.6967, -1.9353, 0.0178, 1.0167] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 elif step == 5: assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.3351, 0.2241, -0.1837, -0.2325, -0.6577, 0.3393, -0.0241, 0.5899, 1.3875] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3 test_callback_fn.has_been_called = False pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=None) prompt = "Andromeda galaxy in a bottle" generator = np.random.RandomState(0) pipe( prompt=prompt, num_inference_steps=5, guidance_scale=7.5, generator=generator, callback=test_callback_fn, callback_steps=1, ) assert test_callback_fn.has_been_called assert number_of_steps == 6 def test_stable_diffusion_no_safety_checker(self): pipe = OnnxStableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", revision="onnx", safety_checker=None, feature_extractor=None, provider=self.gpu_provider, sess_options=self.gpu_options, ) assert isinstance(pipe, OnnxStableDiffusionPipeline) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = OnnxStableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_image_variation.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModelWithProjection from diffusers import ( AutoencoderKL, DPMSolverMultistepScheduler, PNDMScheduler, StableDiffusionImageVariationPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionImageVariationPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionImageVariationPipeline params = IMAGE_VARIATION_PARAMS batch_params = IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) image_encoder_config = CLIPVisionConfig( hidden_size=32, projection_dim=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, image_size=32, patch_size=4, ) image_encoder = CLIPVisionModelWithProjection(image_encoder_config) feature_extractor = CLIPImageProcessor(crop_size=32, size=32) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "image_encoder": image_encoder, "feature_extractor": feature_extractor, "safety_checker": None, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_img_variation_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5239, 0.5723, 0.4796, 0.5049, 0.5550, 0.4685, 0.5329, 0.4891, 0.4921]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img_variation_multiple_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImageVariationPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["image"] = 2 * [inputs["image"]] output = sd_pipe(**inputs) image = output.images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 64, 64, 3) expected_slice = np.array([0.6892, 0.5637, 0.5836, 0.5771, 0.6254, 0.6409, 0.5580, 0.5569, 0.5289]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class StableDiffusionImageVariationPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/input_image_vermeer.png" ) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "image": init_image, "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_img_variation_pipeline_default(self): sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", safety_checker=None ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) generator_device = "cpu" inputs = self.get_inputs(generator_device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.8449, 0.9079, 0.7571, 0.7873, 0.8348, 0.7010, 0.6694, 0.6873, 0.6138]) max_diff = numpy_cosine_similarity_distance(image_slice, expected_slice) assert max_diff < 1e-4 def test_stable_diffusion_img_variation_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.7974, -0.4343, -1.087, 0.04785, -1.327, 0.855, -2.148, -0.1725, 1.439]) max_diff = numpy_cosine_similarity_distance(latents_slice.flatten(), expected_slice) assert max_diff < 1e-3 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.3232, 0.004883, 0.913, -1.084, 0.6143, -1.6875, -2.463, -0.439, -0.419]) max_diff = numpy_cosine_similarity_distance(latents_slice.flatten(), expected_slice) assert max_diff < 1e-3 callback_fn.has_been_called = False pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", safety_checker=None, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() generator_device = "cpu" inputs = self.get_inputs(generator_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == inputs["num_inference_steps"] def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionImageVariationPipeline.from_pretrained( "lambdalabs/sd-image-variations-diffusers", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.6 GB is allocated assert mem_bytes < 2.6 * 10**9 @nightly @require_torch_gpu class StableDiffusionImageVariationPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/input_image_vermeer.png" ) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "image": init_image, "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_img_variation_pndm(self): sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained("fusing/sd-image-variations-diffusers") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/lambdalabs_variations_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img_variation_dpm(self): sd_pipe = StableDiffusionImageVariationPipeline.from_pretrained("fusing/sd-image-variations-diffusers") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_imgvar/lambdalabs_variations_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_panorama.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class StableDiffusionPanoramaPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionPanoramaPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler() torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "a photo of the dolomites", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_panorama_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_circular_padding_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs, circular_padding=True).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # override to speed the overall test timing up. def test_inference_batch_consistent(self): super().test_inference_batch_consistent(batch_sizes=[1, 2]) # override to speed the overall test timing up. def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=2, expected_max_diff=5.0e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_stable_diffusion_panorama_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_views_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, view_batch_size=2) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_views_batch_circular_padding(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs, circular_padding=True, view_batch_size=2) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6127, 0.6299, 0.4595, 0.4051, 0.4543, 0.3925, 0.5510, 0.5693, 0.5031]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_panorama_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True ) sd_pipe = StableDiffusionPanoramaPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch_gpu class StableDiffusionPanoramaNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "a photo of the dolomites", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_panorama_default(self): model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) expected_slice = np.array( [ 0.36968392, 0.27025372, 0.32446766, 0.28379387, 0.36363274, 0.30733347, 0.27100027, 0.27054125, 0.25536096, ] ) assert np.abs(expected_slice - image_slice).max() < 1e-2 def test_stable_diffusion_panorama_k_lms(self): pipe = StableDiffusionPanoramaPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2048, 3) expected_slice = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice).max() < 1e-2 def test_stable_diffusion_panorama_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [ 0.18681869, 0.33907816, 0.5361276, 0.14432865, -0.02856611, -0.73941123, 0.23397987, 0.47322682, -0.37823164, ] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [ 0.18539645, 0.33987248, 0.5378559, 0.14437142, -0.02455261, -0.7338317, 0.23990755, 0.47356272, -0.3786505, ] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 3 def test_stable_diffusion_panorama_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() model_ckpt = "stabilityai/stable-diffusion-2-base" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionPanoramaPipeline.from_pretrained(model_ckpt, scheduler=scheduler, safety_checker=None) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs() _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_model_editing.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, PNDMScheduler, StableDiffusionModelEditingPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class StableDiffusionModelEditingPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionModelEditingPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler() torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "A field of roses", "generator": generator, # Setting height and width to None to prevent OOMs on CPU. "height": None, "width": None, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_model_editing_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionModelEditingPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4755, 0.5132, 0.4976, 0.3904, 0.3554, 0.4765, 0.5139, 0.5158, 0.4889]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_model_editing_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionModelEditingPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4992, 0.5101, 0.5004, 0.3949, 0.3604, 0.4735, 0.5216, 0.5204, 0.4913]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_model_editing_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionModelEditingPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4747, 0.5372, 0.4779, 0.4982, 0.5543, 0.4816, 0.5238, 0.4904, 0.5027]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_model_editing_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler() sd_pipe = StableDiffusionModelEditingPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) # the pipeline does not expect pndm so test if it raises error. with self.assertRaises(ValueError): _ = sd_pipe(**inputs).images def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-3) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=5e-3) @nightly @require_torch_gpu class StableDiffusionModelEditingSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0): generator = torch.manual_seed(seed) inputs = { "prompt": "A field of roses", "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_model_editing_default(self): model_ckpt = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionModelEditingPipeline.from_pretrained(model_ckpt, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array( [0.6749496, 0.6386453, 0.51443267, 0.66094905, 0.61921215, 0.5491332, 0.5744417, 0.58075106, 0.5174658] ) assert np.abs(expected_slice - image_slice).max() < 1e-2 # make sure image changes after editing pipe.edit_model("A pack of roses", "A pack of blue roses") image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) assert np.abs(expected_slice - image_slice).max() > 1e-1 def test_stable_diffusion_model_editing_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() model_ckpt = "CompVis/stable-diffusion-v1-4" scheduler = DDIMScheduler.from_pretrained(model_ckpt, subfolder="scheduler") pipe = StableDiffusionModelEditingPipeline.from_pretrained( model_ckpt, scheduler=scheduler, safety_checker=None ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs() _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 4.4 GB is allocated assert mem_bytes < 4.4 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import traceback import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, AutoencoderTiny, DDIMScheduler, DPMSolverMultistepScheduler, HeunDiscreteScheduler, LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, skip_mps, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() # Will be run via run_test_in_subprocess def _test_img2img_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 768, 3) expected_slice = np.array([0.0606, 0.0570, 0.0805, 0.0579, 0.0628, 0.0623, 0.0843, 0.1115, 0.0806]) assert np.abs(expected_slice - image_slice).max() < 1e-3 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class StableDiffusionImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_tiny_autoencoder(self): return AutoencoderTiny(in_channels=3, out_channels=3, latent_channels=4) def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_img2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4555, 0.3216, 0.4049, 0.4620, 0.4618, 0.4126, 0.4122, 0.4629, 0.4579]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_default_case_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_default_case_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.5709, 0.4614, 0.4587, 0.5978, 0.5298, 0.6910, 0.6240, 0.5212, 0.5454]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = sd_pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4593, 0.3408, 0.4232, 0.4749, 0.4476, 0.4115, 0.4357, 0.4733, 0.4663]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * 2 inputs["image"] = inputs["image"].repeat(2, 1, 1, 1) image = sd_pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) expected_slice = np.array([0.4241, 0.5576, 0.5711, 0.4792, 0.4311, 0.5952, 0.5827, 0.5138, 0.5109]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear" ) sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4398, 0.4949, 0.4337, 0.6580, 0.5555, 0.4338, 0.5769, 0.5955, 0.5175]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_img2img_tiny_autoencoder(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe.vae = self.get_dummy_tiny_autoencoder() sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.00669, 0.00669, 0.0, 0.00693, 0.00858, 0.0, 0.00567, 0.00515, 0.00125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent() @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass(expected_max_diff=5e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionImg2ImgPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 3 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) @slow @require_torch_gpu class StableDiffusionImg2ImgPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_img2img_default(self): pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 768, 3) expected_slice = np.array([0.4300, 0.4662, 0.4930, 0.3990, 0.4307, 0.4525, 0.3719, 0.4064, 0.3923]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_img2img_k_lms(self): pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 768, 3) expected_slice = np.array([0.0389, 0.0346, 0.0415, 0.0290, 0.0218, 0.0210, 0.0408, 0.0567, 0.0271]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_img2img_ddim(self): pipe = StableDiffusionImg2ImgPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", safety_checker=None) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 768, 3) expected_slice = np.array([0.0593, 0.0607, 0.0851, 0.0582, 0.0636, 0.0721, 0.0751, 0.0981, 0.0781]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_img2img_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 96) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.4958, 0.5107, 1.1045, 2.7539, 4.6680, 3.8320, 1.5049, 1.8633, 2.6523]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 96) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([-0.4956, 0.5078, 1.0918, 2.7520, 4.6484, 3.8125, 1.5146, 1.8633, 2.6367]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 2 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def test_stable_diffusion_pipeline_with_model_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() inputs = self.get_inputs(torch_device, dtype=torch.float16) # Normal inference pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # With model offloading # Reload but don't move to cuda pipe = StableDiffusionImg2ImgPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16, ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) _ = pipe(**inputs) mem_bytes_offloaded = torch.cuda.max_memory_allocated() assert mem_bytes_offloaded < mem_bytes for module in pipe.text_encoder, pipe.unet, pipe.vae: assert module.device == torch.device("cpu") def test_img2img_2nd_order(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = HeunDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 10 inputs["strength"] = 0.75 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/img2img_heun.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 5e-2 inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 11 inputs["strength"] = 0.75 image_other = sd_pipe(**inputs).images[0] mean_diff = np.abs(image - image_other).mean() # images should be very similar assert mean_diff < 5e-2 def test_stable_diffusion_img2img_pipeline_multiple_of_8(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ) # resize to resolution that is divisible by 8 but not 16 or 32 init_image = init_image.resize((760, 504)) model_id = "CompVis/stable-diffusion-v1-4" pipe = StableDiffusionImg2ImgPipeline.from_pretrained( model_id, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "A fantasy landscape, trending on artstation" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5, generator=generator, output_type="np", ) image = output.images[0] image_slice = image[255:258, 383:386, -1] assert image.shape == (504, 760, 3) expected_slice = np.array([0.9393, 0.9500, 0.9399, 0.9438, 0.9458, 0.9400, 0.9455, 0.9414, 0.9423]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-3 def test_img2img_safety_checker_works(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 20 # make sure the safety checker is activated inputs["prompt"] = "naked, sex, porn" out = sd_pipe(**inputs) assert out.nsfw_content_detected[0], f"Safety checker should work for prompt: {inputs['prompt']}" assert np.abs(out.images[0]).sum() < 1e-5 # should be all zeros @require_python39_or_higher @require_torch_2 def test_img2img_compile(self): seed = 0 inputs = self.get_inputs(torch_device, seed=seed) # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_img2img_compile, inputs=inputs) @nightly @require_torch_gpu class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) inputs = { "prompt": "a fantasy landscape, concept art, high resolution", "image": init_image, "generator": generator, "num_inference_steps": 50, "strength": 0.75, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_img2img_pndm(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/stable_diffusion_1_5_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_ddim(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/stable_diffusion_1_5_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_lms(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/stable_diffusion_1_5_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_dpm(self): sd_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 30 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/stable_diffusion_1_5_dpm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/pixart/test_pixart.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import unittest import numpy as np import torch from transformers import AutoTokenizer, T5EncoderModel from diffusers import ( AutoencoderKL, DDIMScheduler, PixArtAlphaPipeline, Transformer2DModel, ) from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, to_np enable_full_determinism() class PixArtAlphaPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = PixArtAlphaPipeline params = TEXT_TO_IMAGE_PARAMS - {"cross_attention_kwargs"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params def get_dummy_components(self): torch.manual_seed(0) transformer = Transformer2DModel( sample_size=8, num_layers=2, patch_size=2, attention_head_dim=8, num_attention_heads=3, caption_channels=32, in_channels=4, cross_attention_dim=24, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_single", norm_elementwise_affine=False, norm_eps=1e-6, ) torch.manual_seed(0) vae = AutoencoderKL() scheduler = DDIMScheduler() text_encoder = T5EncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5") tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5") components = { "transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "use_resolution_binning": False, "output_type": "np", } return inputs def test_sequential_cpu_offload_forward_pass(self): # TODO(PVP, Sayak) need to fix later return def test_save_load_optional_components(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] ( prompt_embeds, prompt_attention_mask, negative_prompt_embeds, negative_prompt_attention_mask, ) = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attention_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": negative_prompt_attention_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 8, 8, 3)) expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.483, 0.2583, 0.5331, 0.4852]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_non_square_images(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs, height=32, width=48).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 32, 48, 3)) expected_slice = np.array([0.6493, 0.537, 0.4081, 0.4762, 0.3695, 0.4711, 0.3026, 0.5218, 0.5263]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_with_embeddings_and_multiple_images(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = inputs["prompt"] generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] prompt_embeds, prompt_attn_mask, negative_prompt_embeds, neg_prompt_attn_mask = pipe.encode_prompt(prompt) # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } # set all optional components to None for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) generator = inputs["generator"] num_inference_steps = inputs["num_inference_steps"] output_type = inputs["output_type"] # inputs with prompt converted to embeddings inputs = { "prompt_embeds": prompt_embeds, "prompt_attention_mask": prompt_attn_mask, "negative_prompt": None, "negative_prompt_embeds": negative_prompt_embeds, "negative_prompt_attention_mask": neg_prompt_attn_mask, "generator": generator, "num_inference_steps": num_inference_steps, "output_type": output_type, "num_images_per_prompt": 2, "use_resolution_binning": False, } output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess(max_diff, 1e-4) def test_inference_with_multiple_images_per_prompt(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_images_per_prompt"] = 2 image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (2, 8, 8, 3)) expected_slice = np.array([0.6319, 0.3526, 0.3806, 0.6327, 0.4639, 0.483, 0.2583, 0.5331, 0.4852]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_raises_warning_for_mask_feature(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs.update({"mask_feature": True}) with self.assertWarns(FutureWarning) as warning_ctx: _ = pipe(**inputs).images assert "mask_feature" in str(warning_ctx.warning) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @slow @require_torch_gpu class PixArtAlphaPipelineIntegrationTests(unittest.TestCase): ckpt_id_1024 = "PixArt-alpha/PixArt-XL-2-1024-MS" ckpt_id_512 = "PixArt-alpha/PixArt-XL-2-512x512" prompt = "A small cactus with a happy face in the Sahara desert." def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_pixart_1024(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1941, 0.2117, 0.2188, 0.1946, 0.218, 0.2124, 0.199, 0.2437, 0.2583]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_pixart_512(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt image = pipe(prompt, generator=generator, output_type="np").images image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.2637, 0.291, 0.2939, 0.207, 0.2512, 0.2783, 0.2168, 0.2324, 0.2817]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_pixart_1024_without_resolution_binning(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_1024, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt height, width = 1024, 768 num_inference_steps = 10 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4) def test_pixart_512_without_resolution_binning(self): generator = torch.manual_seed(0) pipe = PixArtAlphaPipeline.from_pretrained(self.ckpt_id_512, torch_dtype=torch.float16) pipe.enable_model_cpu_offload() prompt = self.prompt height, width = 512, 768 num_inference_steps = 10 image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", ).images image_slice = image[0, -3:, -3:, -1] generator = torch.manual_seed(0) no_res_bin_image = pipe( prompt, height=height, width=width, generator=generator, num_inference_steps=num_inference_steps, output_type="np", use_resolution_binning=False, ).images no_res_bin_image_slice = no_res_bin_image[0, -3:, -3:, -1] assert not np.allclose(image_slice, no_res_bin_image_slice, atol=1e-4, rtol=1e-4)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/pndm/test_pndm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import PNDMPipeline, PNDMScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch, torch_device enable_full_determinism() class PNDMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def test_inference(self): unet = self.dummy_uncond_unet scheduler = PNDMScheduler() pndm = PNDMPipeline(unet=unet, scheduler=scheduler) pndm.to(torch_device) pndm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = pndm(generator=generator, num_inference_steps=20, output_type="numpy").images generator = torch.manual_seed(0) image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="numpy", return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @nightly @require_torch class PNDMPipelineIntegrationTests(unittest.TestCase): def test_inference_cifar10(self): model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) scheduler = PNDMScheduler() pndm = PNDMPipeline(unet=unet, scheduler=scheduler) pndm.to(torch_device) pndm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = pndm(generator=generator, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion_superresolution.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, LDMSuperResolutionPipeline, UNet2DModel, VQModel from diffusers.utils import PIL_INTERPOLATION from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, require_torch, torch_device, ) enable_full_determinism() class LDMSuperResolutionPipelineFastTests(unittest.TestCase): @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=6, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model @property def dummy_vq_model(self): torch.manual_seed(0) model = VQModel( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=3, ) return model def test_inference_superresolution(self): device = "cpu" unet = self.dummy_uncond_unet scheduler = DDIMScheduler() vqvae = self.dummy_vq_model ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) ldm.to(device) ldm.set_progress_bar_config(disable=None) init_image = self.dummy_image.to(device) generator = torch.Generator(device=device).manual_seed(0) image = ldm(image=init_image, generator=generator, num_inference_steps=2, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.8678, 0.8245, 0.6381, 0.6830, 0.4385, 0.5599, 0.4641, 0.6201, 0.5150]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_inference_superresolution_fp16(self): unet = self.dummy_uncond_unet scheduler = DDIMScheduler() vqvae = self.dummy_vq_model # put models in fp16 unet = unet.half() vqvae = vqvae.half() ldm = LDMSuperResolutionPipeline(unet=unet, vqvae=vqvae, scheduler=scheduler) ldm.to(torch_device) ldm.set_progress_bar_config(disable=None) init_image = self.dummy_image.to(torch_device) image = ldm(init_image, num_inference_steps=2, output_type="numpy").images assert image.shape == (1, 64, 64, 3) @nightly @require_torch class LDMSuperResolutionPipelineIntegrationTests(unittest.TestCase): def test_inference_superresolution(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool.png" ) init_image = init_image.resize((64, 64), resample=PIL_INTERPOLATION["lanczos"]) ldm = LDMSuperResolutionPipeline.from_pretrained("duongna/ldm-super-resolution", device_map="auto") ldm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ldm(image=init_image, generator=generator, num_inference_steps=20, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.7644, 0.7679, 0.7642, 0.7633, 0.7666, 0.7560, 0.7425, 0.7257, 0.6907]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = LDMTextToImagePipeline params = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } batch_params = TEXT_TO_IMAGE_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=(32, 64), in_channels=3, out_channels=3, down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_inference_text2img(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LDMTextToImagePipeline(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) expected_slice = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @nightly @require_torch_gpu class LDMTextToImagePipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_ldm_default_ddim(self): pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878]) max_diff = np.abs(expected_slice - image_slice).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class LDMTextToImagePipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_ldm_default_ddim(self): pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/animatediff/test_animatediff.py
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AnimateDiffPipeline, AutoencoderKL, DDIMScheduler, MotionAdapter, UNet2DConditionModel, UNetMotionModel, ) from diffusers.utils import is_xformers_available, logging from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class AnimateDiffPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AnimateDiffPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback_on_step_end", "callback_on_step_end_tensor_inputs", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock2D", "DownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", clip_sample=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") motion_adapter = MotionAdapter( block_out_channels=(32, 64), motion_layers_per_block=2, motion_norm_num_groups=2, motion_num_attention_heads=4, ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "motion_adapter": motion_adapter, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 7.5, "output_type": "pt", } return inputs def test_motion_unet_loading(self): components = self.get_dummy_components() pipe = AnimateDiffPipeline(**components) assert isinstance(pipe.unet, UNetMotionModel) @unittest.skip("Attention slicing is not enabled in this pipeline") def test_attention_slicing_forward_pass(self): pass def test_inference_batch_single_identical( self, batch_size=2, expected_max_diff=1e-4, additional_params_copy_to_batched_inputs=["num_inference_steps"], ): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for components in pipe.components.values(): if hasattr(components, "set_default_attn_processor"): components.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) # Reset generator in case it is has been used in self.get_dummy_inputs inputs["generator"] = self.get_generator(0) logger = logging.get_logger(pipe.__module__) logger.setLevel(level=diffusers.logging.FATAL) # batchify inputs batched_inputs = {} batched_inputs.update(inputs) for name in self.batch_params: if name not in inputs: continue value = inputs[name] if name == "prompt": len_prompt = len(value) batched_inputs[name] = [value[: len_prompt // i] for i in range(1, batch_size + 1)] batched_inputs[name][-1] = 100 * "very long" else: batched_inputs[name] = batch_size * [value] if "generator" in inputs: batched_inputs["generator"] = [self.get_generator(i) for i in range(batch_size)] if "batch_size" in inputs: batched_inputs["batch_size"] = batch_size for arg in additional_params_copy_to_batched_inputs: batched_inputs[arg] = inputs[arg] output = pipe(**inputs) output_batch = pipe(**batched_inputs) assert output_batch[0].shape[0] == batch_size max_diff = np.abs(to_np(output_batch[0][0]) - to_np(output[0][0])).max() assert max_diff < expected_max_diff @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") # pipeline creates a new motion UNet under the hood. So we need to check the device from pipe.components model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [ component.device.type for component in pipe.components.values() if hasattr(component, "device") ] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cuda"))[0] self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # pipeline creates a new motion UNet under the hood. So we need to check the dtype from pipe.components model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes)) pipe.to(torch_dtype=torch.float16) model_dtypes = [component.dtype for component in pipe.components.values() if hasattr(component, "dtype")] self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes)) def test_prompt_embeds(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) inputs.pop("prompt") inputs["prompt_embeds"] = torch.randn((1, 4, 32), device=torch_device) pipe(**inputs) def test_free_init(self): components = self.get_dummy_components() pipe: AnimateDiffPipeline = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to(torch_device) inputs_normal = self.get_dummy_inputs(torch_device) frames_normal = pipe(**inputs_normal).frames[0] free_init_generator = torch.Generator(device=torch_device).manual_seed(0) pipe.enable_free_init( num_iters=2, use_fast_sampling=True, method="butterworth", order=4, spatial_stop_frequency=0.25, temporal_stop_frequency=0.25, generator=free_init_generator, ) inputs_enable_free_init = self.get_dummy_inputs(torch_device) frames_enable_free_init = pipe(**inputs_enable_free_init).frames[0] pipe.disable_free_init() inputs_disable_free_init = self.get_dummy_inputs(torch_device) frames_disable_free_init = pipe(**inputs_disable_free_init).frames[0] sum_enabled = np.abs(to_np(frames_normal) - to_np(frames_enable_free_init)).sum() max_diff_disabled = np.abs(to_np(frames_normal) - to_np(frames_disable_free_init)).max() self.assertGreater( sum_enabled, 1e2, "Enabling of FreeInit should lead to results different from the default pipeline results" ) self.assertLess( max_diff_disabled, 1e-4, "Disabling of FreeInit should lead to results similar to the default pipeline results", ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) for component in pipe.components.values(): if hasattr(component, "set_default_attn_processor"): component.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs).frames[0] output_without_offload = ( output_without_offload.cpu() if torch.is_tensor(output_without_offload) else output_without_offload ) pipe.enable_xformers_memory_efficient_attention() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs).frames[0] output_with_offload = ( output_with_offload.cpu() if torch.is_tensor(output_with_offload) else output_without_offload ) max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, 1e-4, "XFormers attention should not affect the inference results") @slow @require_torch_gpu class AnimateDiffPipelineSlowTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_animatediff(self): adapter = MotionAdapter.from_pretrained("guoyww/animatediff-motion-adapter-v1-5-2") pipe = AnimateDiffPipeline.from_pretrained("frankjoshua/toonyou_beta6", motion_adapter=adapter) pipe = pipe.to(torch_device) pipe.scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="linear", steps_offset=1, clip_sample=False, ) pipe.enable_vae_slicing() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "night, b&w photo of old house, post apocalypse, forest, storm weather, wind, rocks, 8k uhd, dslr, soft lighting, high quality, film grain" negative_prompt = "bad quality, worse quality" generator = torch.Generator("cpu").manual_seed(0) output = pipe( prompt, negative_prompt=negative_prompt, num_frames=16, generator=generator, guidance_scale=7.5, num_inference_steps=3, output_type="np", ) image = output.frames[0] assert image.shape == (16, 512, 512, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array( [ 0.11357737, 0.11285847, 0.11180121, 0.11084166, 0.11414117, 0.09785956, 0.10742754, 0.10510018, 0.08045256, ] ) assert numpy_cosine_similarity_distance(image_slice.flatten(), expected_slice.flatten()) < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/dance_diffusion/test_dance_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNet1DModel from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DanceDiffusionPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DanceDiffusionPipeline params = UNCONDITIONAL_AUDIO_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "callback", "latents", "callback_steps", "output_type", "num_images_per_prompt", } batch_params = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS test_attention_slicing = False def get_dummy_components(self): torch.manual_seed(0) unet = UNet1DModel( block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=512, sample_rate=16_000, in_channels=2, out_channels=2, flip_sin_to_cos=True, use_timestep_embedding=False, time_embedding_type="fourier", mid_block_type="UNetMidBlock1D", down_block_types=("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), up_block_types=("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), ) scheduler = IPNDMScheduler() components = { "unet": unet, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "batch_size": 1, "generator": generator, "num_inference_steps": 4, } return inputs def test_dance_diffusion(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = DanceDiffusionPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) expected_slice = np.array([-0.7265, 1.0000, -0.8388, 0.1175, 0.9498, -1.0000]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_save_load_local(self): return super().test_save_load_local() @skip_mps def test_dict_tuple_outputs_equivalent(self): return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) @skip_mps def test_save_load_optional_components(self): return super().test_save_load_optional_components() @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @nightly @require_torch_gpu class PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dance_diffusion(self): device = torch_device pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k") pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) expected_slice = np.array([-0.0192, -0.0231, -0.0318, -0.0059, 0.0002, -0.0020]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2 def test_dance_diffusion_fp16(self): device = torch_device pipe = DanceDiffusionPipeline.from_pretrained("harmonai/maestro-150k", torch_dtype=torch.float16) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) output = pipe(generator=generator, num_inference_steps=100, audio_length_in_s=4.096) audio = output.audios audio_slice = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) expected_slice = np.array([-0.0367, -0.0488, -0.0771, -0.0525, -0.0444, -0.0341]) assert np.abs(audio_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/dit/test_dit.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, Transformer2DModel from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class DiTPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = DiTPipeline params = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } batch_params = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) transformer = Transformer2DModel( sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=True, activation_fn="gelu-approximate", num_embeds_ada_norm=1000, norm_type="ada_norm_zero", norm_elementwise_affine=False, ) vae = AutoencoderKL() scheduler = DDIMScheduler() components = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3)) expected_slice = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @nightly @require_torch_gpu class DiTPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_dit_256(self): generator = torch.manual_seed(0) pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256") pipe.to("cuda") words = ["vase", "umbrella", "white shark", "white wolf"] ids = pipe.get_label_ids(words) images = pipe(ids, generator=generator, num_inference_steps=40, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max()) < 1e-2 def test_dit_512(self): pipe = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") words = ["vase", "umbrella"] ids = pipe.get_label_ids(words) generator = torch.manual_seed(0) images = pipe(ids, generator=generator, num_inference_steps=25, output_type="np").images for word, image in zip(words, images): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max()) < 1e-1
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/ddpm/test_ddpm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers import DDPMPipeline, DDPMScheduler, UNet2DModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device enable_full_determinism() class DDPMPipelineFastTests(unittest.TestCase): @property def dummy_uncond_unet(self): torch.manual_seed(0) model = UNet2DModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=("DownBlock2D", "AttnDownBlock2D"), up_block_types=("AttnUpBlock2D", "UpBlock2D"), ) return model def test_fast_inference(self): device = "cpu" unet = self.dummy_uncond_unet scheduler = DDPMScheduler() ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) ddpm.to(device) ddpm.set_progress_bar_config(disable=None) generator = torch.Generator(device=device).manual_seed(0) image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array( [9.956e-01, 5.785e-01, 4.675e-01, 9.930e-01, 0.0, 1.000, 1.199e-03, 2.648e-04, 5.101e-04] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_predict_sample(self): unet = self.dummy_uncond_unet scheduler = DDPMScheduler(prediction_type="sample") ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images generator = torch.manual_seed(0) image_eps = ddpm(generator=generator, num_inference_steps=2, output_type="numpy")[0] image_slice = image[0, -3:, -3:, -1] image_eps_slice = image_eps[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) tolerance = 1e-2 if torch_device != "mps" else 3e-2 assert np.abs(image_slice.flatten() - image_eps_slice.flatten()).max() < tolerance @slow @require_torch_gpu class DDPMPipelineIntegrationTests(unittest.TestCase): def test_inference_cifar10(self): model_id = "google/ddpm-cifar10-32" unet = UNet2DModel.from_pretrained(model_id) scheduler = DDPMScheduler.from_pretrained(model_id) ddpm = DDPMPipeline(unet=unet, scheduler=scheduler) ddpm.to(torch_device) ddpm.set_progress_bar_config(disable=None) generator = torch.manual_seed(0) image = ddpm(generator=generator, output_type="numpy").images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) expected_slice = np.array([0.4200, 0.3588, 0.1939, 0.3847, 0.3382, 0.2647, 0.4155, 0.3582, 0.3385]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/semantic_stable_diffusion/test_semantic_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel from diffusers.pipelines.semantic_stable_diffusion import SemanticStableDiffusionPipeline as StableDiffusionPipeline from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, nightly, require_torch_gpu, torch_device, ) enable_full_determinism() class SafeDiffusionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModel(config) @property def dummy_extractor(self): def extract(*args, **kwargs): class Out: def __init__(self): self.pixel_values = torch.ones([0]) def to(self, device): self.pixel_values.to(device) return self return Out() return extract def test_semantic_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5753, 0.6114, 0.5001, 0.5034, 0.5470, 0.4729, 0.4971, 0.4867, 0.4867]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_semantic_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5122, 0.5712, 0.4825, 0.5053, 0.5646, 0.4769, 0.5179, 0.4894, 0.4994]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_semantic_diffusion_no_safety_checker(self): pipe = StableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-lms-pipe", safety_checker=None ) assert isinstance(pipe, StableDiffusionPipeline) assert isinstance(pipe.scheduler, LMSDiscreteScheduler) assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(tmpdirname) pipe = StableDiffusionPipeline.from_pretrained(tmpdirname) # sanity check that the pipeline still works assert pipe.safety_checker is None image = pipe("example prompt", num_inference_steps=2).images[0] assert image is not None @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_semantic_diffusion_fp16(self): """Test that stable diffusion works with fp16""" unet = self.dummy_cond_unet scheduler = PNDMScheduler(skip_prk_steps=True) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # put models in fp16 unet = unet.half() vae = vae.half() bert = bert.half() # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=self.dummy_extractor, ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" image = sd_pipe([prompt], num_inference_steps=2, output_type="np").images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class SemanticDiffusionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_positive_guidance(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a photo of a cat" edit = { "editing_prompt": ["sunglasses"], "reverse_editing_direction": [False], "edit_warmup_steps": 10, "edit_guidance_scale": 6, "edit_threshold": 0.95, "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 3 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.34673113, 0.38492733, 0.37597352, 0.34086335, 0.35650748, 0.35579205, 0.3384763, 0.34340236, 0.3573271, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.41887826, 0.37728766, 0.30138272, 0.41416335, 0.41664985, 0.36283392, 0.36191246, 0.43364465, 0.43001732, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_negative_guidance(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "an image of a crowded boulevard, realistic, 4k" edit = { "editing_prompt": "crowd, crowded, people", "reverse_editing_direction": True, "edit_warmup_steps": 10, "edit_guidance_scale": 8.3, "edit_threshold": 0.9, "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 9 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.43497998, 0.91814065, 0.7540739, 0.55580205, 0.8467265, 0.5389691, 0.62574506, 0.58897763, 0.50926757, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.3089719, 0.30500144, 0.29016042, 0.30630964, 0.325687, 0.29419225, 0.2908091, 0.28723598, 0.27696294, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_multi_cond_guidance(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a castle next to a river" edit = { "editing_prompt": ["boat on a river, boat", "monet, impression, sunrise"], "reverse_editing_direction": False, "edit_warmup_steps": [15, 18], "edit_guidance_scale": 6, "edit_threshold": [0.9, 0.8], "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 48 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.75163555, 0.76037145, 0.61785, 0.9189673, 0.8627701, 0.85189694, 0.8512813, 0.87012076, 0.8312857, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.73553365, 0.7537271, 0.74341905, 0.66480356, 0.6472925, 0.63039416, 0.64812905, 0.6749717, 0.6517102, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_guidance_fp16(self): torch_device = "cuda" pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a photo of a cat" edit = { "editing_prompt": ["sunglasses"], "reverse_editing_direction": [False], "edit_warmup_steps": 10, "edit_guidance_scale": 6, "edit_threshold": 0.95, "edit_momentum_scale": 0.5, "edit_mom_beta": 0.6, } seed = 3 guidance_scale = 7 # no sega enabled generator = torch.Generator(torch_device) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.34887695, 0.3876953, 0.375, 0.34423828, 0.3581543, 0.35717773, 0.3383789, 0.34570312, 0.359375, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 # with sega enabled # generator = torch.manual_seed(seed) generator.manual_seed(seed) output = pipe( [prompt], generator=generator, guidance_scale=guidance_scale, num_inference_steps=50, output_type="np", width=512, height=512, **edit, ) image = output.images image_slice = image[0, -3:, -3:, -1] expected_slice = [ 0.42285156, 0.36914062, 0.29077148, 0.42041016, 0.41918945, 0.35498047, 0.3618164, 0.4423828, 0.43115234, ] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_video_to_video.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, UNet3DConditionModel, VideoToVideoSDPipeline, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, is_flaky, nightly, numpy_cosine_similarity_distance, skip_mps, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class VideoToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = VideoToVideoSDPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"}) - {"image", "width", "height"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"}) - {"image"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} test_attention_slicing = False # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=32, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=True, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[ 8, ], in_channels=3, out_channels=3, down_block_types=[ "DownEncoderBlock2D", ], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): # 3 frames video = floats_tensor((1, 3, 3, 32, 32), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "video": video, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = VideoToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) expected_slice = np.array([162.0, 136.0, 132.0, 140.0, 139.0, 137.0, 169.0, 134.0, 132.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @is_flaky() def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=0.001) @is_flaky() def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent() @is_flaky() def test_save_load_local(self): super().test_save_load_local() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=5e-3) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @nightly @skip_mps class VideoToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self): pipe = VideoToVideoSDPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) pipe.enable_model_cpu_offload() # 10 frames generator = torch.Generator(device="cpu").manual_seed(0) video = torch.randn((1, 10, 3, 320, 576), generator=generator) prompt = "Spiderman is surfing" video_frames = pipe(prompt, video=video, generator=generator, num_inference_steps=3, output_type="pt").frames expected_array = np.array([-0.9770508, -0.8027344, -0.62646484, -0.8334961, -0.7573242]) output_array = video_frames.cpu().numpy()[0, 0, 0, 0, -5:] assert numpy_cosine_similarity_distance(expected_array, output_array) < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero_sdxl.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import inspect import io import re import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, TextToVideoZeroSDXLPipeline, UNet2DConditionModel from diffusers.utils.import_utils import is_accelerate_available, is_accelerate_version from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() def to_np(tensor): if isinstance(tensor, torch.Tensor): tensor = tensor.detach().cpu().numpy() return tensor class TextToVideoZeroSDXLPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = TextToVideoZeroSDXLPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS generator_device = "cpu" def get_dummy_components(self, seed=0): torch.manual_seed(seed) unet = UNet2DConditionModel( block_out_channels=(2, 4), layers_per_block=2, sample_size=2, norm_num_groups=2, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) scheduler = DDIMScheduler( num_train_timesteps=1000, beta_start=0.0001, beta_end=0.02, beta_schedule="linear", trained_betas=None, clip_sample=True, set_alpha_to_one=True, steps_offset=0, prediction_type="epsilon", thresholding=False, dynamic_thresholding_ratio=0.995, clip_sample_range=1.0, sample_max_value=1.0, timestep_spacing="leading", rescale_betas_zero_snr=False, ) torch.manual_seed(seed) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(seed) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A panda dancing in Antarctica", "generator": generator, "num_inference_steps": 5, "t0": 1, "t1": 3, "height": 64, "width": 64, "video_length": 3, "output_type": "np", } return inputs def get_generator(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) return generator def test_text_to_video_zero_sdxl(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) inputs = self.get_dummy_inputs(self.generator_device) result = pipe(**inputs).images first_frame_slice = result[0, -3:, -3:, -1] last_frame_slice = result[-1, -3:, -3:, 0] expected_slice1 = np.array([0.48, 0.58, 0.53, 0.59, 0.50, 0.44, 0.60, 0.65, 0.52]) expected_slice2 = np.array([0.66, 0.49, 0.40, 0.70, 0.47, 0.51, 0.73, 0.65, 0.52]) assert np.abs(first_frame_slice.flatten() - expected_slice1).max() < 1e-2 assert np.abs(last_frame_slice.flatten() - expected_slice2).max() < 1e-2 @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_attention_slicing_forward_pass(self): pass def test_cfg(self): sig = inspect.signature(self.pipeline_class.__call__) if "guidance_scale" not in sig.parameters: return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(self.generator_device) inputs["guidance_scale"] = 1.0 out_no_cfg = pipe(**inputs)[0] inputs["guidance_scale"] = 7.5 out_cfg = pipe(**inputs)[0] assert out_cfg.shape == out_no_cfg.shape def test_dict_tuple_outputs_equivalent(self, expected_max_difference=1e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(self.generator_device))[0] output_tuple = pipe(**self.get_dummy_inputs(self.generator_device), return_dict=False)[0] max_diff = np.abs(to_np(output) - to_np(output_tuple)).max() self.assertLess(max_diff, expected_max_difference) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self, expected_max_diff=5e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) components = self.get_dummy_components() pipe_fp16 = self.pipeline_class(**components) pipe_fp16.to(torch_device, torch.float16) pipe_fp16.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(self.generator_device) # # Reset generator in case it is used inside dummy inputs if "generator" in inputs: inputs["generator"] = self.get_generator(self.generator_device) output = pipe(**inputs)[0] fp16_inputs = self.get_dummy_inputs(self.generator_device) # Reset generator in case it is used inside dummy inputs if "generator" in fp16_inputs: fp16_inputs["generator"] = self.get_generator(self.generator_device) output_fp16 = pipe_fp16(**fp16_inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_fp16)).max() self.assertLess(max_diff, expected_max_diff, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_inference_batch_single_identical(self): pass @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.17.0"), reason="CPU offload is only available with CUDA and `accelerate v0.17.0` or higher", ) def test_model_cpu_offload_forward_pass(self, expected_max_diff=2e-4): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(self.generator_device) output_without_offload = pipe(**inputs)[0] pipe.enable_model_cpu_offload() inputs = self.get_dummy_inputs(self.generator_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(to_np(output_with_offload) - to_np(output_without_offload)).max() self.assertLess(max_diff, expected_max_diff, "CPU offloading should not affect the inference results") @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_pipeline_call_signature(self): pass def test_progress_bar(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) inputs = self.get_dummy_inputs(self.generator_device) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) stderr = stderr.getvalue() # we can't calculate the number of progress steps beforehand e.g. for strength-dependent img2img, # so we just match "5" in "#####| 1/5 [00:01<00:00]" max_steps = re.search("/(.*?) ", stderr).group(1) self.assertTrue(max_steps is not None and len(max_steps) > 0) self.assertTrue( f"{max_steps}/{max_steps}" in stderr, "Progress bar should be enabled and stopped at the max step" ) pipe.set_progress_bar_config(disable=True) with io.StringIO() as stderr, contextlib.redirect_stderr(stderr): _ = pipe(**inputs) self.assertTrue(stderr.getvalue() == "", "Progress bar should be disabled") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self, expected_max_diff=1e-2): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(self.generator_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(self.generator_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(to_np(output) - to_np(output_loaded)).max() self.assertLess( max_diff, expected_max_diff, "The output of the fp16 pipeline changed after saving and loading." ) @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_save_load_local(self): pass @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_save_load_optional_components(self): pass @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_sequential_cpu_offload_forward_pass(self): pass @unittest.skipIf(torch_device != "cuda", reason="CUDA and CPU are required to switch devices") def test_to_device(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) pipe.to("cpu") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cpu" for device in model_devices)) output_cpu = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu self.assertTrue(np.isnan(output_cpu).sum() == 0) pipe.to("cuda") model_devices = [component.device.type for component in components.values() if hasattr(component, "device")] self.assertTrue(all(device == "cuda" for device in model_devices)) output_cuda = pipe(**self.get_dummy_inputs("cpu"))[0] # generator set to cpu self.assertTrue(np.isnan(to_np(output_cuda)).sum() == 0) @unittest.skip( reason="Cannot call `set_default_attn_processor` as this pipeline uses a specific attention processor." ) def test_xformers_attention_forwardGenerator_pass(self): pass @nightly @require_torch_gpu class TextToVideoZeroSDXLPipelineSlowTests(unittest.TestCase): def test_full_model(self): model_id = "stabilityai/stable-diffusion-xl-base-1.0" pipe = TextToVideoZeroSDXLPipeline.from_pretrained( model_id, torch_dtype=torch.float16, variant="fp16", use_safetensors=True ) pipe.enable_model_cpu_offload() pipe.enable_vae_slicing() pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "A panda dancing in Antarctica" result = pipe(prompt=prompt, generator=generator).images first_frame_slice = result[0, -3:, -3:, -1] last_frame_slice = result[-1, -3:, -3:, 0] expected_slice1 = np.array([0.57, 0.57, 0.57, 0.57, 0.57, 0.56, 0.55, 0.56, 0.56]) expected_slice2 = np.array([0.54, 0.53, 0.53, 0.53, 0.53, 0.52, 0.53, 0.53, 0.53]) assert np.abs(first_frame_slice.flatten() - expected_slice1).max() < 1e-2 assert np.abs(last_frame_slice.flatten() - expected_slice2).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, TextToVideoSDPipeline, UNet3DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() @skip_mps class TextToVideoSDPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = TextToVideoSDPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS # No `output_type`. required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet3DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("CrossAttnDownBlock3D", "DownBlock3D"), up_block_types=("UpBlock3D", "CrossAttnUpBlock3D"), cross_attention_dim=4, attention_head_dim=4, norm_num_groups=2, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=(8,), in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D"], latent_channels=4, sample_size=32, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=4, intermediate_size=16, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "pt", } return inputs def test_text_to_video_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = TextToVideoSDPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["output_type"] = "np" frames = sd_pipe(**inputs).frames image_slice = frames[0][-3:, -3:, -1] assert frames[0].shape == (32, 32, 3) expected_slice = np.array([192.0, 44.0, 157.0, 140.0, 108.0, 104.0, 123.0, 144.0, 129.0]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", reason="Feature isn't heavily used. Test in CUDA environment only.") def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False, expected_max_diff=3e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False, expected_max_diff=1e-2) # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_consistent(self): pass # (todo): sayakpaul @unittest.skip(reason="Batching needs to be properly figured out first for this pipeline.") def test_inference_batch_single_identical(self): pass @unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline.") def test_num_images_per_prompt(self): pass def test_progress_bar(self): return super().test_progress_bar() @slow @skip_mps @require_torch_gpu class TextToVideoSDPipelineSlowTests(unittest.TestCase): def test_two_step_model(self): expected_video = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" ) pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames video = video_frames.cpu().numpy() assert np.abs(expected_video - video).mean() < 5e-2 def test_two_step_model_with_freeu(self): expected_video = [] pipe = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b") pipe = pipe.to(torch_device) prompt = "Spiderman is surfing" generator = torch.Generator(device="cpu").manual_seed(0) pipe.enable_freeu(s1=0.9, s2=0.2, b1=1.2, b2=1.4) video_frames = pipe(prompt, generator=generator, num_inference_steps=2, output_type="pt").frames video = video_frames.cpu().numpy() video = video[0, 0, -3:, -3:, -1].flatten() expected_video = [-0.3102, -0.2477, -0.1772, -0.648, -0.6176, -0.5484, -0.0217, -0.056, -0.0177] assert np.abs(expected_video - video).mean() < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/text_to_video_synthesis/test_text_to_video_zero.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import DDIMScheduler, TextToVideoZeroPipeline from diffusers.utils.testing_utils import load_pt, nightly, require_torch_gpu from ..test_pipelines_common import assert_mean_pixel_difference @nightly @require_torch_gpu class TextToVideoZeroPipelineSlowTests(unittest.TestCase): def test_full_model(self): model_id = "runwayml/stable-diffusion-v1-5" pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) generator = torch.Generator(device="cuda").manual_seed(0) prompt = "A bear is playing a guitar on Times Square" result = pipe(prompt=prompt, generator=generator).images expected_result = load_pt( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text-to-video/A bear is playing a guitar on Times Square.pt" ) assert_mean_pixel_difference(result, expected_result)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyV22PriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "horse", "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PriorPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22PriorPipeline params = ["prompt"] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "num_images_per_prompt", "generator", "num_inference_steps", "latents", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] callback_cfg_params = ["prompt_embeds", "text_encoder_hidden_states", "text_mask"] test_xformers_attention = False def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_prior(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-3) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, ) # override default test because no output_type "latent", use "pt" instead def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["num_inference_steps"] = 2 inputs["output_type"] = "pt" output = pipe(**inputs)[0] assert output.abs().sum() == 0
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_prior_emb2emb.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from PIL import Image from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyV22PriorEmb2EmbPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class KandinskyV22PriorEmb2EmbPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22PriorEmb2EmbPipeline params = ["prompt", "image"] batch_params = ["prompt", "image"] required_optional_params = [ "num_images_per_prompt", "strength", "generator", "num_inference_steps", "negative_prompt", "guidance_scale", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_tokenizer(self): tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") return tokenizer @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=self.text_embedder_hidden_size, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) return CLIPTextModelWithProjection(config) @property def dummy_prior(self): torch.manual_seed(0) model_kwargs = { "num_attention_heads": 2, "attention_head_dim": 12, "embedding_dim": self.text_embedder_hidden_size, "num_layers": 1, } model = PriorTransformer(**model_kwargs) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 model.clip_std = nn.Parameter(torch.ones(model.clip_std.shape)) return model @property def dummy_image_encoder(self): torch.manual_seed(0) config = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=224, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=14, ) model = CLIPVisionModelWithProjection(config) return model @property def dummy_image_processor(self): image_processor = CLIPImageProcessor( crop_size=224, do_center_crop=True, do_normalize=True, do_resize=True, image_mean=[0.48145466, 0.4578275, 0.40821073], image_std=[0.26862954, 0.26130258, 0.27577711], resample=3, size=224, ) return image_processor def get_dummy_components(self): prior = self.dummy_prior image_encoder = self.dummy_image_encoder text_encoder = self.dummy_text_encoder tokenizer = self.dummy_tokenizer image_processor = self.dummy_image_processor scheduler = UnCLIPScheduler( variance_type="fixed_small_log", prediction_type="sample", num_train_timesteps=1000, clip_sample=True, clip_sample_range=10.0, ) components = { "prior": prior, "image_encoder": image_encoder, "text_encoder": text_encoder, "tokenizer": tokenizer, "scheduler": scheduler, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) inputs = { "prompt": "horse", "image": init_image, "strength": 0.5, "generator": generator, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_kandinsky_prior_emb2emb(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.image_embeds image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -10:] image_from_tuple_slice = image_from_tuple[0, -10:] assert image.shape == (1, 32) expected_slice = np.array( [ 0.1071284, 1.3330271, 0.61260223, -0.6691065, -0.3846852, -1.0303661, 0.22716111, 0.03348901, 0.30040675, -0.24805029, ] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @skip_mps def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=1e-2) @skip_mps def test_attention_slicing_forward_pass(self): test_max_difference = torch_device == "cpu" test_mean_pixel_difference = False self._test_attention_slicing_forward_pass( test_max_difference=test_max_difference, test_mean_pixel_difference=test_mean_pixel_difference, )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22ControlnetImg2ImgPipeline, KandinskyV22PriorEmb2EmbPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class KandinskyV22ControlnetImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22ControlnetImg2ImgPipeline params = ["image_embeds", "negative_image_embeds", "image", "hint"] batch_params = ["image_embeds", "negative_image_embeds", "image", "hint"] required_optional_params = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq ddim_config = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.00085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } scheduler = DDIMScheduler(**ddim_config) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create hint hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs def test_kandinsky_controlnet_img2img(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.54985034, 0.55509365, 0.52561504, 0.5570494, 0.5593818, 0.5263979, 0.50285643, 0.5069846, 0.51196736] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1.75e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) @nightly @require_torch_gpu class KandinskyV22ControlnetImg2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_controlnet_img2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) init_image = init_image.resize((512, 512)) hint = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) hint = torch.from_numpy(np.array(hint)).float() / 255.0 hint = hint.permute(2, 0, 1).unsqueeze(0) prompt = "A robot, 4k photo" pipe_prior = KandinskyV22PriorEmb2EmbPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22ControlnetImg2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, image=init_image, strength=0.85, generator=generator, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, hint=hint, generator=generator, num_inference_steps=100, height=512, width=512, strength=0.5, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_controlnet.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyV22ControlnetPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class KandinskyV22ControlnetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22ControlnetPipeline params = ["image_embeds", "negative_image_embeds", "hint"] batch_params = ["image_embeds", "negative_image_embeds", "hint"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 100 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 8, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image_hint", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create hint hint = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "hint": hint, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs def test_kandinsky_controlnet(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) @nightly @require_torch_gpu class KandinskyV22ControlnetPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_controlnet(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) hint = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) hint = torch.from_numpy(np.array(hint)).float() / 255.0 hint = hint.permute(2, 0, 1).unsqueeze(0) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22ControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "A robot, 4k photo" generator = torch.Generator(device="cuda").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cuda").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, hint=hint, generator=generator, num_inference_steps=100, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq ddim_config = { "num_train_timesteps": 1000, "beta_schedule": "linear", "beta_start": 0.00085, "beta_end": 0.012, "clip_sample": False, "set_alpha_to_one": False, "steps_offset": 0, "prediction_type": "epsilon", "thresholding": False, } scheduler = DDIMScheduler(**ddim_config) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 10, "guidance_scale": 7.0, "strength": 0.2, "output_type": "np", } return inputs class KandinskyV22Img2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgPipeline params = ["image_embeds", "negative_image_embeds", "image"] batch_params = [ "image_embeds", "negative_image_embeds", "image", ] required_optional_params = [ "generator", "height", "width", "strength", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeds"] def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_img2img(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5712, 0.5443, 0.4725, 0.6195, 0.5184, 0.4651, 0.4473, 0.4590, 0.5016]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) @slow @require_torch_gpu class KandinskyV22Img2ImgPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_img2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_img2img_frog.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) prompt = "A red cartoon frog, 4k" pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22Img2ImgPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, strength=0.2, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_combined.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from diffusers import ( KandinskyV22CombinedPipeline, KandinskyV22Img2ImgCombinedPipeline, KandinskyV22InpaintCombinedPipeline, ) from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin from .test_kandinsky import Dummies from .test_kandinsky_img2img import Dummies as Img2ImgDummies from .test_kandinsky_inpaint import Dummies as InpaintDummies from .test_kandinsky_prior import Dummies as PriorDummies enable_full_determinism() class KandinskyV22PipelineCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22CombinedPipeline params = [ "prompt", ] batch_params = ["prompt", "negative_prompt"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = True callback_cfg_params = ["image_embds"] def get_dummy_components(self): dummy = Dummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update( { "height": 64, "width": 64, } ) return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3013, 0.0471, 0.5176, 0.1817, 0.2566, 0.7076, 0.6712, 0.4421, 0.7503]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineImg2ImgCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Img2ImgCombinedPipeline params = ["prompt", "image"] batch_params = ["prompt", "negative_prompt", "image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embds"] def get_dummy_components(self): dummy = Img2ImgDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = Img2ImgDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4353, 0.4710, 0.5128, 0.4806, 0.5054, 0.5348, 0.5224, 0.4603, 0.5025]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=2e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_callback_inputs(self): pass def test_callback_cfg(self): pass class KandinskyV22PipelineInpaintCombinedFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintCombinedPipeline params = ["prompt", "image", "mask_image"] batch_params = ["prompt", "negative_prompt", "image", "mask_image"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "negative_prompt", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False def get_dummy_components(self): dummy = InpaintDummies() prior_dummy = PriorDummies() components = dummy.get_dummy_components() components.update({f"prior_{k}": v for k, v in prior_dummy.get_dummy_components().items()}) return components def get_dummy_inputs(self, device, seed=0): prior_dummy = PriorDummies() dummy = InpaintDummies() inputs = prior_dummy.get_dummy_inputs(device=device, seed=seed) inputs.update(dummy.get_dummy_inputs(device=device, seed=seed)) inputs.pop("image_embeds") inputs.pop("negative_image_embeds") return inputs def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5039, 0.4926, 0.4898, 0.4978, 0.4838, 0.4942, 0.4738, 0.4702, 0.4816]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" @require_torch_gpu def test_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=1e-2) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=5e-4) def test_model_cpu_offload_forward_pass(self): super().test_model_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_callback_inputs(self): pass def test_callback_cfg(self): pass
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyV22InpaintPipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 9, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) # create init_image image = floats_tensor((1, 3, 64, 64), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((256, 256)) # create mask mask = np.zeros((64, 64), dtype=np.float32) mask[:32, :32] = 1 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": init_image, "mask_image": mask, "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "num_inference_steps": 2, "guidance_scale": 4.0, "output_type": "np", } return inputs class KandinskyV22InpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22InpaintPipeline params = ["image_embeds", "negative_image_embeds", "image", "mask_image"] batch_params = [ "image_embeds", "negative_image_embeds", "image", "mask_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] test_xformers_attention = False callback_cfg_params = ["image_embeds", "masked_image", "mask_image"] def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def test_kandinsky_inpaint(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] ) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) def test_model_cpu_offload_forward_pass(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=5e-4) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) # override default test because we need to zero out mask too in order to make sure final latent is all zero def test_callback_inputs(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["latents"] = torch.zeros_like(callback_kwargs["latents"]) callback_kwargs["mask_image"] = torch.zeros_like(callback_kwargs["mask_image"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class KandinskyV22InpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_inpaint(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" ) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" ) mask = np.zeros((768, 768), dtype=np.float32) mask[:250, 250:-250] = 1 prompt = "a hat" pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22InpaintPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder-inpaint", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() output = pipeline( image=init_image, mask_image=mask, image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, height=768, width=768, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/kandinsky2_2/test_kandinsky.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from diffusers import DDIMScheduler, KandinskyV22Pipeline, KandinskyV22PriorPipeline, UNet2DConditionModel, VQModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class Dummies: @property def text_embedder_hidden_size(self): return 32 @property def time_input_dim(self): return 32 @property def block_out_channels_0(self): return self.time_input_dim @property def time_embed_dim(self): return self.time_input_dim * 4 @property def cross_attention_dim(self): return 32 @property def dummy_unet(self): torch.manual_seed(0) model_kwargs = { "in_channels": 4, # Out channels is double in channels because predicts mean and variance "out_channels": 8, "addition_embed_type": "image", "down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"), "up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"), "mid_block_type": "UNetMidBlock2DSimpleCrossAttn", "block_out_channels": (self.block_out_channels_0, self.block_out_channels_0 * 2), "layers_per_block": 1, "encoder_hid_dim": self.text_embedder_hidden_size, "encoder_hid_dim_type": "image_proj", "cross_attention_dim": self.cross_attention_dim, "attention_head_dim": 4, "resnet_time_scale_shift": "scale_shift", "class_embed_type": None, } model = UNet2DConditionModel(**model_kwargs) return model @property def dummy_movq_kwargs(self): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def dummy_movq(self): torch.manual_seed(0) model = VQModel(**self.dummy_movq_kwargs) return model def get_dummy_components(self): unet = self.dummy_unet movq = self.dummy_movq scheduler = DDIMScheduler( num_train_timesteps=1000, beta_schedule="linear", beta_start=0.00085, beta_end=0.012, clip_sample=False, set_alpha_to_one=False, steps_offset=1, prediction_type="epsilon", thresholding=False, ) components = { "unet": unet, "scheduler": scheduler, "movq": movq, } return components def get_dummy_inputs(self, device, seed=0): image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed)).to(device) negative_image_embeds = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1)).to( device ) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image_embeds": image_embeds, "negative_image_embeds": negative_image_embeds, "generator": generator, "height": 64, "width": 64, "guidance_scale": 4.0, "num_inference_steps": 2, "output_type": "np", } return inputs class KandinskyV22PipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = KandinskyV22Pipeline params = [ "image_embeds", "negative_image_embeds", ] batch_params = ["image_embeds", "negative_image_embeds"] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] callback_cfg_params = ["image_embds"] test_xformers_attention = False def get_dummy_inputs(self, device, seed=0): dummies = Dummies() return dummies.get_dummy_inputs(device=device, seed=seed) def get_dummy_components(self): dummies = Dummies() return dummies.get_dummy_components() def test_kandinsky(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(device)) image = output.images image_from_tuple = pipe( **self.get_dummy_inputs(device), return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.3420, 0.9505, 0.3919, 1.0000, 0.5188, 0.3109, 0.6139, 0.5624, 0.6811]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def test_float16_inference(self): super().test_float16_inference(expected_max_diff=1e-1) @slow @require_torch_gpu class KandinskyV22PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_kandinsky_text2img(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" ) pipe_prior = KandinskyV22PriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16 ) pipe_prior.to(torch_device) pipeline = KandinskyV22Pipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16 ) pipeline = pipeline.to(torch_device) pipeline.set_progress_bar_config(disable=None) prompt = "red cat, 4k photo" generator = torch.Generator(device="cuda").manual_seed(0) image_emb, zero_image_emb = pipe_prior( prompt, generator=generator, num_inference_steps=5, negative_prompt="", ).to_tuple() generator = torch.Generator(device="cuda").manual_seed(0) output = pipeline( image_embeds=image_emb, negative_image_embeds=zero_image_emb, generator=generator, num_inference_steps=100, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(image, expected_image)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/audioldm/test_audioldm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, nightly, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AudioLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AudioLDMPipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(32, 64), class_embed_type="simple_projection", projection_class_embeddings_input_dim=32, class_embeddings_concat=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, projection_dim=32, ) text_encoder = ClapTextModelWithProjection(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_audioldm_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = audioldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] ) assert np.abs(audio_slice - expected_slice).max() < 1e-2 def test_audioldm_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = audioldm_pipe.tokenizer( prompt, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = audioldm_pipe.text_encoder( text_inputs, ) prompt_embeds = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state prompt_embeds = F.normalize(prompt_embeds, dim=-1) inputs["prompt_embeds"] = prompt_embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm_negative_prompt_embeds(self): components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = audioldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = audioldm_pipe.tokenizer( p, padding="max_length", max_length=audioldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) text_embeds = audioldm_pipe.text_encoder( text_inputs, ) text_embeds = text_embeds.text_embeds # additional L_2 normalization over each hidden-state text_embeds = F.normalize(text_embeds, dim=-1) embeds.append(text_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = audioldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_audioldm_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = audioldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] ) assert np.abs(audio_slice - expected_slice).max() < 1e-2 def test_audioldm_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = audioldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = audioldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = audioldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = audioldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_audioldm_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = audioldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = audioldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = audioldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_audioldm_vocoder_model_in_dim(self): components = self.get_dummy_components() audioldm_pipe = AudioLDMPipeline(**components) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = audioldm_pipe.vocoder.config config.model_in_dim *= 2 audioldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = audioldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) @nightly class AudioLDMPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm(self): audioldm_pipe = AudioLDMPipeline.from_pretrained("cvssp/audioldm") audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81920 audio_slice = audio[77230:77240] expected_slice = np.array( [-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-2 @nightly class AudioLDMPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_audioldm_lms(self): audioldm_pipe = AudioLDMPipeline.from_pretrained("cvssp/audioldm") audioldm_pipe.scheduler = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config) audioldm_pipe = audioldm_pipe.to(torch_device) audioldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = audioldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81920 audio_slice = audio[27780:27790] expected_slice = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 3e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/musicldm/test_musicldm.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import ( ClapAudioConfig, ClapConfig, ClapFeatureExtractor, ClapModel, ClapTextConfig, RobertaTokenizer, SpeechT5HifiGan, SpeechT5HifiGanConfig, ) from diffusers import ( AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, MusicLDMPipeline, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class MusicLDMPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = MusicLDMPipeline params = TEXT_TO_AUDIO_PARAMS batch_params = TEXT_TO_AUDIO_BATCH_PARAMS required_optional_params = frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=(32, 64), class_embed_type="simple_projection", projection_class_embeddings_input_dim=32, class_embeddings_concat=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=1, out_channels=1, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_branch_config = ClapTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=16, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=2, num_hidden_layers=2, pad_token_id=1, vocab_size=1000, ) audio_branch_config = ClapAudioConfig( spec_size=64, window_size=4, num_mel_bins=64, intermediate_size=37, layer_norm_eps=1e-05, depths=[2, 2], num_attention_heads=[2, 2], num_hidden_layers=2, hidden_size=192, patch_size=2, patch_stride=2, patch_embed_input_channels=4, ) text_encoder_config = ClapConfig.from_text_audio_configs( text_config=text_branch_config, audio_config=audio_branch_config, projection_dim=32 ) text_encoder = ClapModel(text_encoder_config) tokenizer = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta", model_max_length=77) feature_extractor = ClapFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-ClapModel", hop_length=7900 ) torch.manual_seed(0) vocoder_config = SpeechT5HifiGanConfig( model_in_dim=8, sampling_rate=16000, upsample_initial_channel=16, upsample_rates=[2, 2], upsample_kernel_sizes=[4, 4], resblock_kernel_sizes=[3, 7], resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]], normalize_before=False, ) vocoder = SpeechT5HifiGan(vocoder_config) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "feature_extractor": feature_extractor, "vocoder": vocoder, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A hammer hitting a wooden surface", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, } return inputs def test_musicldm_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = musicldm_pipe(**inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0027, -0.0036, -0.0037, -0.0020, -0.0035, -0.0019, -0.0037, -0.0020, -0.0038, -0.0019] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_musicldm_prompt_embeds(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = musicldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] text_inputs = musicldm_pipe.tokenizer( prompt, padding="max_length", max_length=musicldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) prompt_embeds = musicldm_pipe.text_encoder.get_text_features(text_inputs) inputs["prompt_embeds"] = prompt_embeds # forward output = musicldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_musicldm_negative_prompt_embeds(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] # forward output = musicldm_pipe(**inputs) audio_1 = output.audios[0] inputs = self.get_dummy_inputs(torch_device) prompt = 3 * [inputs.pop("prompt")] embeds = [] for p in [prompt, negative_prompt]: text_inputs = musicldm_pipe.tokenizer( p, padding="max_length", max_length=musicldm_pipe.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_inputs = text_inputs["input_ids"].to(torch_device) text_embeds = musicldm_pipe.text_encoder.get_text_features( text_inputs, ) embeds.append(text_embeds) inputs["prompt_embeds"], inputs["negative_prompt_embeds"] = embeds # forward output = musicldm_pipe(**inputs) audio_2 = output.audios[0] assert np.abs(audio_1 - audio_2).max() < 1e-2 def test_musicldm_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "egg cracking" output = musicldm_pipe(**inputs, negative_prompt=negative_prompt) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) == 256 audio_slice = audio[:10] expected_slice = np.array( [-0.0027, -0.0036, -0.0037, -0.0019, -0.0035, -0.0018, -0.0037, -0.0021, -0.0038, -0.0018] ) assert np.abs(audio_slice - expected_slice).max() < 1e-4 def test_musicldm_num_waveforms_per_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(device) musicldm_pipe.set_progress_bar_config(disable=None) prompt = "A hammer hitting a wooden surface" # test num_waveforms_per_prompt=1 (default) audios = musicldm_pipe(prompt, num_inference_steps=2).audios assert audios.shape == (1, 256) # test num_waveforms_per_prompt=1 (default) for batch of prompts batch_size = 2 audios = musicldm_pipe([prompt] * batch_size, num_inference_steps=2).audios assert audios.shape == (batch_size, 256) # test num_waveforms_per_prompt for single prompt num_waveforms_per_prompt = 2 audios = musicldm_pipe(prompt, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt).audios assert audios.shape == (num_waveforms_per_prompt, 256) # test num_waveforms_per_prompt for batch of prompts batch_size = 2 audios = musicldm_pipe( [prompt] * batch_size, num_inference_steps=2, num_waveforms_per_prompt=num_waveforms_per_prompt ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 256) def test_musicldm_audio_length_in_s(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) vocoder_sampling_rate = musicldm_pipe.vocoder.config.sampling_rate inputs = self.get_dummy_inputs(device) output = musicldm_pipe(audio_length_in_s=0.016, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.016 output = musicldm_pipe(audio_length_in_s=0.032, **inputs) audio = output.audios[0] assert audio.ndim == 1 assert len(audio) / vocoder_sampling_rate == 0.032 def test_musicldm_vocoder_model_in_dim(self): components = self.get_dummy_components() musicldm_pipe = MusicLDMPipeline(**components) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) prompt = ["hey"] output = musicldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape assert audio_shape == (1, 256) config = musicldm_pipe.vocoder.config config.model_in_dim *= 2 musicldm_pipe.vocoder = SpeechT5HifiGan(config).to(torch_device) output = musicldm_pipe(prompt, num_inference_steps=1) audio_shape = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 256) def test_attention_slicing_forward_pass(self): self._test_attention_slicing_forward_pass(test_mean_pixel_difference=False) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical() @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=False) def test_to_dtype(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.set_progress_bar_config(disable=None) # The method component.dtype returns the dtype of the first parameter registered in the model, not the # dtype of the entire model. In the case of CLAP, the first parameter is a float64 constant (logit scale) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} # Without the logit scale parameters, everything is float32 model_dtypes.pop("text_encoder") self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # the CLAP sub-models are float32 model_dtypes["clap_text_branch"] = components["text_encoder"].text_model.dtype self.assertTrue(all(dtype == torch.float32 for dtype in model_dtypes.values())) # Once we send to fp16, all params are in half-precision, including the logit scale pipe.to(torch_dtype=torch.float16) model_dtypes = {key: component.dtype for key, component in components.items() if hasattr(component, "dtype")} self.assertTrue(all(dtype == torch.float16 for dtype in model_dtypes.values())) @nightly @require_torch_gpu class MusicLDMPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 8, 128, 16)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A hammer hitting a wooden surface", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 2.5, } return inputs def test_musicldm(self): musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm") musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 audio = musicldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[8680:8690] expected_slice = np.array( [-0.1042, -0.1068, -0.1235, -0.1387, -0.1428, -0.136, -0.1213, -0.1097, -0.0967, -0.0945] ) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3 def test_musicldm_lms(self): musicldm_pipe = MusicLDMPipeline.from_pretrained("cvssp/musicldm") musicldm_pipe.scheduler = LMSDiscreteScheduler.from_config(musicldm_pipe.scheduler.config) musicldm_pipe = musicldm_pipe.to(torch_device) musicldm_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) audio = musicldm_pipe(**inputs).audios[0] assert audio.ndim == 1 assert len(audio) == 81952 # check the portion of the generated audio with the largest dynamic range (reduces flakiness) audio_slice = audio[58020:58030] expected_slice = np.array([0.3592, 0.3477, 0.4084, 0.4665, 0.5048, 0.5891, 0.6461, 0.5579, 0.4595, 0.4403]) max_diff = np.abs(expected_slice - audio_slice).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import tempfile import traceback import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, EulerDiscreteScheduler, LCMScheduler, StableDiffusionControlNetPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, load_numpy, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() # Will be run via run_test_in_subprocess def _test_stable_diffusion_compile(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.to("cuda") pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) pipe.controlnet.to(memory_format=torch.channels_last) pipe.controlnet = torch.compile(pipe.controlnet, mode="reduce-overhead", fullgraph=True) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe(prompt, image, num_inference_steps=10, generator=generator, output_type="np") image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out_full.npy" ) expected_image = np.resize(expected_image, (512, 512, 3)) assert np.abs(expected_image - image).max() < 1.0 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class ControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_controlnet_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_controlnet_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.52700454, 0.3930534, 0.25509018, 0.7132304, 0.53696585, 0.46568912, 0.7095368, 0.7059624, 0.4744786] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionMultiControlNetPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass def test_inference_multiple_prompt_input(self): device = "cpu" components = self.get_dummy_components() sd_pipe = StableDiffusionControlNetPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"], inputs["prompt"]] inputs["image"] = [inputs["image"], inputs["image"]] output = sd_pipe(**inputs) image = output.images assert image.shape == (2, 64, 64, 3) image_1, image_2 = image # make sure that the outputs are different assert np.sum(np.abs(image_1 - image_2)) > 1e-3 # multiple prompts, single image conditioning inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"], inputs["prompt"]] output_1 = sd_pipe(**inputs) assert np.abs(image - output_1.images).max() < 1e-3 class StableDiffusionMultiControlNetOneModelPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe( **inputs, control_guidance_start=[0.1], control_guidance_end=[0.2], )[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny_out.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_depth(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-depth") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth_out.npy" ) assert np.abs(expected_image - image).max() < 8e-1 def test_hed(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-hed") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "oil painting of handsome old man, masterpiece" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (704, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/man_hed_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_mlsd(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-mlsd") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "room" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (704, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/room_mlsd_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2 def test_normal(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-normal") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "cute toy" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/cute_toy_normal_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2 def test_openpose(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Chef in the kitchen" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/chef_pose_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_scribble(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-scribble") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(5) prompt = "bag" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (640, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bag_scribble_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_seg(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(5) prompt = "house" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg_out.npy" ) assert np.abs(expected_image - image).max() < 8e-2 def test_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-seg") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() prompt = "house" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/house_seg.png" ) _ = pipe( prompt, image, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 4 * 10**9 def test_canny_guess_mode(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=3.0, guess_mode=True, ) image = output.images[0] assert image.shape == (768, 512, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.2724, 0.2846, 0.2724, 0.3843, 0.3682, 0.2736, 0.4675, 0.3862, 0.2887]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_canny_guess_mode_euler(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=3.0, guess_mode=True, ) image = output.images[0] assert image.shape == (768, 512, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.1655, 0.1721, 0.1623, 0.1685, 0.1711, 0.1646, 0.1651, 0.1631, 0.1494]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 @require_python39_or_higher @require_torch_2 def test_stable_diffusion_compile(self): run_test_in_subprocess(test_case=self, target_func=_test_stable_diffusion_compile, inputs=None) def test_v11_shuffle_global_pool_conditions(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11e_sd15_shuffle") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "New York" image = load_image( "https://huggingface.co/lllyasviel/control_v11e_sd15_shuffle/resolve/main/images/control.png" ) output = pipe( prompt, image, generator=generator, output_type="np", num_inference_steps=3, guidance_scale=7.0, ) image = output.images[0] assert image.shape == (512, 640, 3) image_slice = image[-3:, -3:, -1] expected_slice = np.array([0.1338, 0.1597, 0.1202, 0.1687, 0.1377, 0.1017, 0.2070, 0.1574, 0.1348]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe_1 = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_2 = StableDiffusionControlNetPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, ) pipes = [pipe_1, pipe_2] images = [] for pipe in pipes: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) output = pipe(prompt, image, generator=generator, output_type="np", num_inference_steps=3) images.append(output.images[0]) del pipe gc.collect() torch.cuda.empty_cache() assert np.abs(images[0] - images[1]).max() < 1e-3 @slow @require_torch_gpu class StableDiffusionMultiControlNetPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_pose_and_canny(self): controlnet_canny = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") controlnet_pose = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-openpose") pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=[controlnet_pose, controlnet_canny] ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird and Chef" image_canny = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) image_pose = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) output = pipe(prompt, [image_pose, image_canny], generator=generator, output_type="np", num_inference_steps=3) image = output.images[0] assert image.shape == (768, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose_canny_out.npy" ) assert np.abs(expected_image - image).max() < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_sdxl_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, StableDiffusionXLControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetPipelineSDXLImg2ImgFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, skip_first_text_encoder=False): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64 if not skip_first_text_encoder else 32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder if not skip_first_text_encoder else None, "tokenizer": tokenizer if not skip_first_text_encoder else None, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): controlnet_embedder_scale_factor = 2 image = floats_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), rng=random.Random(seed), ).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": image, } return inputs def test_stable_diffusion_xl_controlnet_img2img(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_controlnet_img2img_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] assert output.images.shape == (1, 64, 64, 3) expected_slice = np.array( [0.5557202, 0.46418434, 0.46983826, 0.623529, 0.5557242, 0.49262643, 0.6070508, 0.5702978, 0.43777135] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_flax_controlnet.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class FlaxControlNetPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_canny(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-canny", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "bird" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) canny_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) processed_image = pipe.prepare_image_inputs([canny_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [0.167969, 0.116699, 0.081543, 0.154297, 0.132812, 0.108887, 0.169922, 0.169922, 0.205078] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2 def test_pose(self): controlnet, controlnet_params = FlaxControlNetModel.from_pretrained( "lllyasviel/sd-controlnet-openpose", from_pt=True, dtype=jnp.bfloat16 ) pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, from_pt=True, dtype=jnp.bfloat16 ) params["controlnet"] = controlnet_params prompts = "Chef in the kitchen" num_samples = jax.device_count() prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples) pose_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" ) processed_image = pipe.prepare_image_inputs([pose_image] * num_samples) rng = jax.random.PRNGKey(0) rng = jax.random.split(rng, jax.device_count()) p_params = replicate(params) prompt_ids = shard(prompt_ids) processed_image = shard(processed_image) images = pipe( prompt_ids=prompt_ids, image=processed_image, params=p_params, prng_seed=rng, num_inference_steps=50, jit=True, ).images assert images.shape == (jax.device_count(), 1, 768, 512, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [[0.271484, 0.261719, 0.275391, 0.277344, 0.279297, 0.291016, 0.294922, 0.302734, 0.302734]] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_sdxl.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, LCMScheduler, StableDiffusionXLControlNetPipeline, UNet2DConditionModel, ) from diffusers.models.unet_2d_blocks import UNetMidBlock2D from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, slow, torch_device from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, ) enable_full_determinism() class StableDiffusionXLControlNetPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # copied from test_stable_diffusion_xl.py def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_controlnet_sdxl_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] expected_slice = np.array( [0.7330834, 0.590667, 0.5667336, 0.6029023, 0.5679491, 0.5968194, 0.4032986, 0.47612396, 0.5089609] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 def test_controlnet_sdxl_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.7799, 0.614, 0.6162, 0.7082, 0.6662, 0.5833, 0.4148, 0.5182, 0.4866]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 class StableDiffusionXLMultiControlNetPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_load_optional_components(self): return self._test_save_load_optional_components() class StableDiffusionXLMultiControlNetOneModelPipelineFastTests( PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 images = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", "image": images, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe( **inputs, control_guidance_start=[0.1], control_guidance_end=[0.2], )[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() def test_negative_conditions(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slice_without_neg_cond = image[0, -3:, -3:, -1] image = pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=(0, 0), negative_target_size=(1024, 1024), ).images image_slice_with_neg_cond = image[0, -3:, -3:, -1] self.assertTrue(np.abs(image_slice_without_neg_cond - image_slice_with_neg_cond).max() > 1e-2) @slow @require_torch_gpu class ControlNetSDXLPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-canny-sdxl-1.0") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.enable_sequential_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (768, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4185, 0.4127, 0.4089, 0.4046, 0.4115, 0.4096, 0.4081, 0.4112, 0.3913]) assert np.allclose(original_image, expected_image, atol=1e-04) def test_depth(self): controlnet = ControlNetModel.from_pretrained("diffusers/controlnet-depth-sdxl-1.0") pipe = StableDiffusionXLControlNetPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", controlnet=controlnet ) pipe.enable_sequential_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "Stormtrooper's lecture" image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/stormtrooper_depth.png" ) images = pipe(prompt, image=image, generator=generator, output_type="np", num_inference_steps=3).images assert images[0].shape == (512, 512, 3) original_image = images[0, -3:, -3:, -1].flatten() expected_image = np.array([0.4399, 0.5112, 0.5478, 0.4314, 0.472, 0.4823, 0.4647, 0.4957, 0.4853]) assert np.allclose(original_image, expected_image, atol=1e-04) class StableDiffusionSSD1BControlNetPipelineFastTests(StableDiffusionXLControlNetPipelineFastTests): def test_controlnet_sdxl_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] expected_slice = np.array( [0.6831671, 0.5702532, 0.5459845, 0.6299793, 0.58563006, 0.6033695, 0.4493941, 0.46132287, 0.5035841] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 def test_controlnet_sdxl_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLControlNetPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6850, 0.5135, 0.5545, 0.7033, 0.6617, 0.5971, 0.4165, 0.5480, 0.5070]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_conditioning_channels(self): unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), mid_block_type="UNetMidBlock2D", # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, time_cond_proj_dim=None, ) controlnet = ControlNetModel.from_unet(unet, conditioning_channels=4) assert type(controlnet.mid_block) == UNetMidBlock2D assert controlnet.conditioning_channels == 4 def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), mid_block_type="UNetMidBlock2D", # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, time_cond_proj_dim=time_cond_proj_dim, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), mid_block_type="UNetMidBlock2D", # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) torch.manual_seed(0) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "feature_extractor": None, "image_encoder": None, } return components
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_inpaint_sdxl.py
# coding=utf-8 # Copyright 2023 Harutatsu Akiyama, Jinbin Bai, and HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, EulerDiscreteScheduler, StableDiffusionXLControlNetInpaintPipeline, UNet2DConditionModel, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetPipelineSDXLFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionXLControlNetInpaintPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = frozenset(IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"mask_image", "control_image"})) image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), conditioning_embedding_out_channels=(16, 32), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, } return components def get_dummy_inputs(self, device, seed=0, img_res=64): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) # Get random floats in [0, 1] as image image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = torch.ones_like(image) controlnet_embedder_scale_factor = 2 control_image = ( floats_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), rng=random.Random(seed), ) .to(device) .cpu() ) control_image = control_image.cpu().permute(0, 2, 3, 1)[0] # Convert image and mask_image to [0, 255] image = 255 * image mask_image = 255 * mask_image control_image = 255 * control_image # Convert to PIL image init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) mask_image = Image.fromarray(np.uint8(mask_image)).convert("L").resize((img_res, img_res)) control_image = Image.fromarray(np.uint8(control_image)).convert("RGB").resize((img_res, img_res)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": init_image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_controlnet_sdxl_guess(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guess_mode"] = True output = sd_pipe(**inputs) image_slice = output.images[0, -3:, -3:, -1] expected_slice = np.array( [0.5381963, 0.4836803, 0.45821992, 0.5577731, 0.51210403, 0.4794795, 0.59282357, 0.5647199, 0.43100584] ) # make sure that it's equal assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 # TODO(Patrick, Sayak) - skip for now as this requires more refiner tests def test_save_load_optional_components(self): pass def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1)
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_blip_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTokenizer from transformers.models.blip_2.configuration_blip_2 import Blip2Config from transformers.models.clip.configuration_clip import CLIPTextConfig from diffusers import ( AutoencoderKL, BlipDiffusionControlNetPipeline, ControlNetModel, PNDMScheduler, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism from src.diffusers.pipelines.blip_diffusion.blip_image_processing import BlipImageProcessor from src.diffusers.pipelines.blip_diffusion.modeling_blip2 import Blip2QFormerModel from src.diffusers.pipelines.blip_diffusion.modeling_ctx_clip import ContextCLIPTextModel from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class BlipDiffusionControlNetPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = BlipDiffusionControlNetPipeline params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", "condtioning_image", ] batch_params = [ "prompt", "reference_image", "source_subject_category", "target_subject_category", "condtioning_image", ] required_optional_params = [ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "neg_prompt", "guidance_scale", "prompt_strength", "prompt_reps", ] def get_dummy_components(self): torch.manual_seed(0) text_encoder_config = CLIPTextConfig( vocab_size=1000, hidden_size=16, intermediate_size=16, projection_dim=16, num_hidden_layers=1, num_attention_heads=1, max_position_embeddings=77, ) text_encoder = ContextCLIPTextModel(text_encoder_config) vae = AutoencoderKL( in_channels=4, out_channels=4, down_block_types=("DownEncoderBlock2D",), up_block_types=("UpDecoderBlock2D",), block_out_channels=(32,), layers_per_block=1, act_fn="silu", latent_channels=4, norm_num_groups=16, sample_size=16, ) blip_vision_config = { "hidden_size": 16, "intermediate_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "image_size": 224, "patch_size": 14, "hidden_act": "quick_gelu", } blip_qformer_config = { "vocab_size": 1000, "hidden_size": 16, "num_hidden_layers": 1, "num_attention_heads": 1, "intermediate_size": 16, "max_position_embeddings": 512, "cross_attention_frequency": 1, "encoder_hidden_size": 16, } qformer_config = Blip2Config( vision_config=blip_vision_config, qformer_config=blip_qformer_config, num_query_tokens=16, tokenizer="hf-internal-testing/tiny-random-bert", ) qformer = Blip2QFormerModel(qformer_config) unet = UNet2DConditionModel( block_out_channels=(4, 16), layers_per_block=1, norm_num_groups=4, sample_size=16, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=16, ) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") scheduler = PNDMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, skip_prk_steps=True, ) controlnet = ControlNetModel( block_out_channels=(4, 16), layers_per_block=1, in_channels=4, norm_num_groups=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=16, conditioning_embedding_out_channels=(8, 16), ) vae.eval() qformer.eval() text_encoder.eval() image_processor = BlipImageProcessor() components = { "text_encoder": text_encoder, "vae": vae, "qformer": qformer, "unet": unet, "tokenizer": tokenizer, "scheduler": scheduler, "controlnet": controlnet, "image_processor": image_processor, } return components def get_dummy_inputs(self, device, seed=0): np.random.seed(seed) reference_image = np.random.rand(32, 32, 3) * 255 reference_image = Image.fromarray(reference_image.astype("uint8")).convert("RGBA") cond_image = np.random.rand(32, 32, 3) * 255 cond_image = Image.fromarray(cond_image.astype("uint8")).convert("RGBA") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "swimming underwater", "generator": generator, "reference_image": reference_image, "condtioning_image": cond_image, "source_subject_category": "dog", "target_subject_category": "dog", "height": 32, "width": 32, "guidance_scale": 7.5, "num_inference_steps": 2, "output_type": "np", } return inputs def test_blipdiffusion_controlnet(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) image = pipe(**self.get_dummy_inputs(device))[0] image_slice = image[0, -3:, -3:, 0] assert image.shape == (1, 16, 16, 4) expected_slice = np.array([0.7953, 0.7136, 0.6597, 0.4779, 0.7389, 0.4111, 0.5826, 0.4150, 0.8422]) assert ( np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily based on: import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetInpaintPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetInpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset({"control_image"}) # skip `image` and `mask` for now, only test for control_image image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class ControlNetSimpleInpaintPipelineFastTests(ControlNetInpaintPipelineFastTests): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components class MultiControlNetInpaintPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(32, 64), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] init_image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) init_image = init_image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(init_image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(init_image + 4)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "mask_image": mask_image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetInpaintPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) prompt = "pitch black hole" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) output = pipe( prompt, image=image, mask_image=mask_image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=3, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/inpaint.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_inpaint(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_inpaint") pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(33) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png" ) init_image = init_image.resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png" ) mask_image = mask_image.resize((512, 512)) prompt = "a handsome man with ray-ban sunglasses" def make_inpaint_condition(image, image_mask): image = np.array(image.convert("RGB")).astype(np.float32) / 255.0 image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0 assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size" image[image_mask > 0.5] = -1.0 # set as masked pixel image = np.expand_dims(image, 0).transpose(0, 3, 1, 2) image = torch.from_numpy(image) return image control_image = make_inpaint_condition(init_image, mask_image) output = pipe( prompt, image=init_image, mask_image=mask_image, control_image=control_image, guidance_scale=9.0, eta=1.0, generator=generator, num_inference_steps=20, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/boy_ray_ban.npy" ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 1e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe_1 = StableDiffusionControlNetInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_2 = StableDiffusionControlNetInpaintPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, ) control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ).resize((512, 512)) pipes = [pipe_1, pipe_2] images = [] for pipe in pipes: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" output = pipe( prompt, image=image, control_image=control_image, mask_image=mask_image, strength=0.9, generator=generator, output_type="np", num_inference_steps=3, ) images.append(output.images[0]) del pipe gc.collect() torch.cuda.empty_cache() assert np.abs(images[0] - images[1]).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/controlnet/test_controlnet_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/ import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, ControlNetModel, DDIMScheduler, StableDiffusionControlNetImg2ImgPipeline, UNet2DConditionModel, ) from diffusers.pipelines.controlnet.pipeline_controlnet import MultiControlNetModel from diffusers.utils import load_image from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_numpy, require_torch_gpu, slow, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() class ControlNetImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"}) image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) controlnet = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ) image = floats_tensor(control_image.shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) class StableDiffusionMultiControlNetPipelineFastTests( PipelineTesterMixin, PipelineKarrasSchedulerTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionControlNetImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset([]) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=1, ) torch.manual_seed(0) def init_weights(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.normal(m.weight) m.bias.data.fill_(1.0) controlnet1 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet1.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) controlnet2 = ControlNetModel( block_out_channels=(4, 8), layers_per_block=2, in_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), cross_attention_dim=32, conditioning_embedding_out_channels=(16, 32), norm_num_groups=1, ) controlnet2.controlnet_down_blocks.apply(init_weights) torch.manual_seed(0) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") controlnet = MultiControlNetModel([controlnet1, controlnet2]) components = { "unet": unet, "controlnet": controlnet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) controlnet_embedder_scale_factor = 2 control_image = [ randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), randn_tensor( (1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor), generator=generator, device=torch.device(device), ), ] image = floats_tensor(control_image[0].shape, rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", "image": image, "control_image": control_image, } return inputs def test_control_guidance_switch(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) scale = 10.0 steps = 4 inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_1 = pipe(**inputs)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_2 = pipe(**inputs, control_guidance_start=0.1, control_guidance_end=0.2)[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_3 = pipe(**inputs, control_guidance_start=[0.1, 0.3], control_guidance_end=[0.2, 0.7])[0] inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = steps inputs["controlnet_conditioning_scale"] = scale output_4 = pipe(**inputs, control_guidance_start=0.4, control_guidance_end=[0.5, 0.8])[0] # make sure that all outputs are different assert np.sum(np.abs(output_1 - output_2)) > 1e-3 assert np.sum(np.abs(output_1 - output_3)) > 1e-3 assert np.sum(np.abs(output_1 - output_4)) > 1e-3 def test_attention_slicing_forward_pass(self): return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_attention_forwardGenerator_pass(self): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(expected_max_diff=2e-3) def test_save_pretrained_raise_not_implemented_exception(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) with tempfile.TemporaryDirectory() as tmpdir: try: # save_pretrained is not implemented for Multi-ControlNet pipe.save_pretrained(tmpdir) except NotImplementedError: pass @slow @require_torch_gpu class ControlNetImg2ImgPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_canny(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny") pipe = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "evil space-punk bird" control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) output = pipe( prompt, image, control_image=control_image, generator=generator, output_type="np", num_inference_steps=50, strength=0.6, ) image = output.images[0] assert image.shape == (512, 512, 3) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" ) assert np.abs(expected_image - image).max() < 9e-2 def test_load_local(self): controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_canny") pipe_1 = StableDiffusionControlNetImg2ImgPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", safety_checker=None, controlnet=controlnet ) controlnet = ControlNetModel.from_single_file( "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth" ) pipe_2 = StableDiffusionControlNetImg2ImgPipeline.from_single_file( "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.safetensors", safety_checker=None, controlnet=controlnet, ) control_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((512, 512)) image = load_image( "https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((512, 512)) pipes = [pipe_1, pipe_2] images = [] for pipe in pipes: pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) generator = torch.Generator(device="cpu").manual_seed(0) prompt = "bird" output = pipe( prompt, image=image, control_image=control_image, strength=0.9, generator=generator, output_type="np", num_inference_steps=3, ) images.append(output.images[0]) del pipe gc.collect() torch.cuda.empty_cache() assert np.abs(images[0] - images[1]).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import nightly, require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @nightly @require_flax class FlaxStableDiffusion2PipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_flax(self): sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2", revision="bf16", dtype=jnp.bfloat16, ) prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512]) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2 @nightly @require_flax class FlaxStableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_dpm_flax(self): model_id = "stabilityai/stable-diffusion-2" scheduler, scheduler_params = FlaxDPMSolverMultistepScheduler.from_pretrained(model_id, subfolder="scheduler") sd_pipe, params = FlaxStableDiffusionPipeline.from_pretrained( model_id, scheduler=scheduler, revision="bf16", dtype=jnp.bfloat16, ) params["scheduler"] = scheduler_params prompt = "A painting of a squirrel eating a burger" num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = sd_pipe.prepare_inputs(prompt) params = replicate(params) prompt_ids = shard(prompt_ids) prng_seed = jax.random.PRNGKey(0) prng_seed = jax.random.split(prng_seed, jax.device_count()) images = sd_pipe(prompt_ids, params, prng_seed, num_inference_steps=25, jit=True)[0] assert images.shape == (jax.device_count(), 1, 768, 768, 3) images = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:]) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297]) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusion2InpaintPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((64, 64)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class StableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_inpaint_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench.npy" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def test_stable_diffusion_inpaint_pipeline_fp16(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint" "/yellow_cat_sitting_on_a_park_bench_fp16.npy" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, torch_dtype=torch.float16, safety_checker=None, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) model_id = "stabilityai/stable-diffusion-2-inpainting" pndm = PNDMScheduler.from_pretrained(model_id, subfolder="scheduler") pipe = StableDiffusionInpaintPipeline.from_pretrained( model_id, safety_checker=None, scheduler=pndm, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() prompt = "Face of a yellow cat, high resolution, sitting on a park bench" generator = torch.manual_seed(0) _ = pipe( prompt=prompt, image=init_image, mask_image=mask_image, generator=generator, num_inference_steps=2, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_depth.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import ( CLIPTextConfig, CLIPTextModel, CLIPTokenizer, DPTConfig, DPTFeatureExtractor, DPTForDepthEstimation, ) from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionDepth2ImgPipeline, UNet2DConditionModel, ) from diffusers.utils import is_accelerate_available, is_accelerate_version from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class StableDiffusionDepth2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionDepth2ImgPipeline test_save_load_optional_components = False params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"depth_mask"}) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=5, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [96, 192, 384, 768], "num_groups": 2, } depth_estimator_config = DPTConfig( image_size=32, patch_size=16, num_channels=3, hidden_size=32, num_hidden_layers=4, backbone_out_indices=(0, 1, 2, 3), num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, is_decoder=False, initializer_range=0.02, is_hybrid=True, backbone_config=backbone_config, backbone_featmap_shape=[1, 384, 24, 24], ) depth_estimator = DPTForDepthEstimation(depth_estimator_config).eval() feature_extractor = DPTFeatureExtractor.from_pretrained( "hf-internal-testing/tiny-random-DPTForDepthEstimation" ) components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "depth_estimator": depth_estimator, "feature_extractor": feature_extractor, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB").resize((32, 32)) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_save_load_local(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_save_load_float16(self): components = self.get_dummy_components() for name, module in components.items(): if hasattr(module, "half"): components[name] = module.to(torch_device).half() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir, torch_dtype=torch.float16) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for name, component in pipe_loaded.components.items(): if hasattr(component, "dtype"): self.assertTrue( component.dtype == torch.float16, f"`{name}.dtype` switched from `float16` to {component.dtype} after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 2e-2, "The output of the fp16 pipeline changed after saving and loading.") @unittest.skipIf(torch_device != "cuda", reason="float16 requires CUDA") def test_float16_inference(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) for name, module in components.items(): if hasattr(module, "half"): components[name] = module.half() pipe_fp16 = self.pipeline_class(**components) pipe_fp16.to(torch_device) pipe_fp16.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(torch_device))[0] output_fp16 = pipe_fp16(**self.get_dummy_inputs(torch_device))[0] max_diff = np.abs(output - output_fp16).max() self.assertLess(max_diff, 1.3e-2, "The outputs of the fp16 and fp32 pipelines are too different.") @unittest.skipIf( torch_device != "cuda" or not is_accelerate_available() or is_accelerate_version("<", "0.14.0"), reason="CPU offload is only available with CUDA and `accelerate v0.14.0` or higher", ) def test_cpu_offload_forward_pass(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) output_without_offload = pipe(**inputs)[0] pipe.enable_sequential_cpu_offload() inputs = self.get_dummy_inputs(torch_device) output_with_offload = pipe(**inputs)[0] max_diff = np.abs(output_with_offload - output_without_offload).max() self.assertLess(max_diff, 1e-4, "CPU offloading should not affect the inference results") def test_dict_tuple_outputs_equivalent(self): components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) output = pipe(**self.get_dummy_inputs(torch_device))[0] output_tuple = pipe(**self.get_dummy_inputs(torch_device), return_dict=False)[0] max_diff = np.abs(output - output_tuple).max() self.assertLess(max_diff, 1e-4) def test_progress_bar(self): super().test_progress_bar() def test_stable_diffusion_depth2img_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6071, 0.5035, 0.4378, 0.5776, 0.5753, 0.4316, 0.4513, 0.5263, 0.4546]) else: expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_negative_prompt(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) negative_prompt = "french fries" output = pipe(**inputs, negative_prompt=negative_prompt) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6296, 0.5125, 0.3890, 0.4456, 0.5955, 0.4621, 0.3810, 0.5310, 0.4626]) else: expected_slice = np.array([0.6012, 0.4507, 0.3769, 0.4121, 0.5566, 0.4585, 0.3803, 0.5045, 0.4631]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_multiple_init_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["prompt"] = [inputs["prompt"]] * 2 inputs["image"] = 2 * [inputs["image"]] image = pipe(**inputs).images image_slice = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) if torch_device == "mps": expected_slice = np.array([0.6501, 0.5150, 0.4939, 0.6688, 0.5437, 0.5758, 0.5115, 0.4406, 0.4551]) else: expected_slice = np.array([0.6557, 0.6214, 0.6254, 0.5775, 0.4785, 0.5949, 0.5904, 0.4785, 0.4730]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_stable_diffusion_depth2img_pil(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = StableDiffusionDepth2ImgPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] if torch_device == "mps": expected_slice = np.array([0.53232, 0.47015, 0.40868, 0.45651, 0.4891, 0.4668, 0.4287, 0.48822, 0.47439]) else: expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @skip_mps def test_attention_slicing_forward_pass(self): return super().test_attention_slicing_forward_pass() def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=7e-3) @slow @require_torch_gpu class StableDiffusionDepth2ImgPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" ) inputs = { "prompt": "two tigers", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_depth2img_pipeline_default(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.5435, 0.4992, 0.3783, 0.4411, 0.5842, 0.4654, 0.3786, 0.5077, 0.4655]) assert np.abs(expected_slice - image_slice).max() < 6e-1 def test_stable_diffusion_depth2img_pipeline_k_lms(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.6363, 0.6274, 0.6309, 0.6370, 0.6226, 0.6286, 0.6213, 0.6453, 0.6306]) assert np.abs(expected_slice - image_slice).max() < 8e-4 def test_stable_diffusion_depth2img_pipeline_ddim(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs() image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 480, 640, 3) expected_slice = np.array([0.6424, 0.6524, 0.6249, 0.6041, 0.6634, 0.6420, 0.6522, 0.6555, 0.6436]) assert np.abs(expected_slice - image_slice).max() < 5e-4 def test_stable_diffusion_depth2img_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 60, 80) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.7168, -1.5137, -0.1418, -2.9219, -2.7266, -2.4414, -2.1035, -3.0078, -1.7051] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 60, 80) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.7109, -1.5068, -0.1403, -2.9160, -2.7207, -2.4414, -2.1035, -3.0059, -1.7090] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == 2 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionDepth2ImgPipeline.from_pretrained( "stabilityai/stable-diffusion-2-depth", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9 @nightly @require_torch_gpu class StableDiffusionImg2ImgPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/depth2img/two_cats.png" ) inputs = { "prompt": "two tigers", "image": init_image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_depth2img_pndm(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_depth2img_ddim(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_lms(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_img2img_dpm(self): pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() inputs["num_inference_steps"] = 30 image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_depth2img/stable_diffusion_2_0_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPipeline, UNet2DConditionModel, logging, ) from diffusers.utils.testing_utils import ( CaptureLogger, backend_empty_cache, enable_full_determinism, load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_accelerator, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusion2PipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0): generator_device = "cpu" if not device.startswith("cuda") else "cuda" if not str(device).startswith("mps"): generator = torch.Generator(device=generator_device).manual_seed(seed) else: generator = torch.manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def test_stable_diffusion_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5753, 0.6113, 0.5005, 0.5036, 0.5464, 0.4725, 0.4982, 0.4865, 0.4861]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_pndm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = PNDMScheduler(skip_prk_steps=True) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5121, 0.5714, 0.4827, 0.5057, 0.5646, 0.4766, 0.5189, 0.4895, 0.4990]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_lms(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerAncestralDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4864, 0.5440, 0.4842, 0.4994, 0.5543, 0.4846, 0.5196, 0.4942, 0.5063]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = EulerDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4865, 0.5439, 0.4840, 0.4995, 0.5543, 0.4846, 0.5199, 0.4942, 0.5061]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_unflawed(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() components["scheduler"] = DDIMScheduler.from_config( components["scheduler"].config, timestep_spacing="trailing" ) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["guidance_rescale"] = 0.7 inputs["num_inference_steps"] = 10 image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4736, 0.5405, 0.4705, 0.4955, 0.5675, 0.4812, 0.5310, 0.4967, 0.5064]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_long_prompt(self): components = self.get_dummy_components() components["scheduler"] = LMSDiscreteScheduler.from_config(components["scheduler"].config) sd_pipe = StableDiffusionPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) do_classifier_free_guidance = True negative_prompt = None num_images_per_prompt = 1 logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion") logger.setLevel(logging.WARNING) prompt = 25 * "@" with CaptureLogger(logger) as cap_logger_3: text_embeddings_3, negeative_text_embeddings_3 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negeative_text_embeddings_3 is not None: text_embeddings_3 = torch.cat([negeative_text_embeddings_3, text_embeddings_3]) prompt = 100 * "@" with CaptureLogger(logger) as cap_logger: text_embeddings, negative_embeddings = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_embeddings is not None: text_embeddings = torch.cat([negative_embeddings, text_embeddings]) negative_prompt = "Hello" with CaptureLogger(logger) as cap_logger_2: text_embeddings_2, negative_text_embeddings_2 = sd_pipe.encode_prompt( prompt, torch_device, num_images_per_prompt, do_classifier_free_guidance, negative_prompt ) if negative_text_embeddings_2 is not None: text_embeddings_2 = torch.cat([negative_text_embeddings_2, text_embeddings_2]) assert text_embeddings_3.shape == text_embeddings_2.shape == text_embeddings.shape assert text_embeddings.shape[1] == 77 assert cap_logger.out == cap_logger_2.out # 100 - 77 + 1 (BOS token) + 1 (EOS token) = 25 assert cap_logger.out.count("@") == 25 assert cap_logger_3.out == "" def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_accelerator @skip_mps class StableDiffusion2PipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" if not str(device).startswith("mps"): generator = torch.Generator(device=_generator_device).manual_seed(seed) else: generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_default_ddim(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) assert np.abs(image_slice - expected_slice).max() < 7e-3 def test_stable_diffusion_pndm(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.49493, 0.47896, 0.40798, 0.54214, 0.53212, 0.48202, 0.47656, 0.46329, 0.48506]) assert np.abs(image_slice - expected_slice).max() < 7e-3 def test_stable_diffusion_k_lms(self): pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base") pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.10440, 0.13115, 0.11100, 0.10141, 0.11440, 0.07215, 0.11332, 0.09693, 0.10006]) assert np.abs(image_slice - expected_slice).max() < 3e-3 @require_torch_gpu def test_stable_diffusion_attention_slicing(self): torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe.unet.set_default_attn_processor() pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # enable attention slicing pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image_sliced = pipe(**inputs).images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 3.3 GB is allocated assert mem_bytes < 3.3 * 10**9 # disable slicing pipe.disable_attention_slicing() pipe.unet.set_default_attn_processor() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images # make sure that more than 3.3 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 3.3 * 10**9 max_diff = numpy_cosine_similarity_distance(image.flatten(), image_sliced.flatten()) assert max_diff < 5e-3 def test_stable_diffusion_text2img_intermediate_state(self): number_of_steps = 0 def callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 1: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [-0.3862, -0.4507, -1.1729, 0.0686, -1.1045, 0.7124, -1.8301, 0.1903, 1.2773] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 2: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array( [0.2720, -0.1863, -0.7383, -0.5029, -0.7534, 0.3970, -0.7646, 0.4468, 1.2686] ) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 callback_fn.has_been_called = False pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) pipe(**inputs, callback=callback_fn, callback_steps=1) assert callback_fn.has_been_called assert number_of_steps == inputs["num_inference_steps"] @require_torch_gpu def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.8 GB is allocated assert mem_bytes < 2.8 * 10**9 @require_torch_gpu def test_stable_diffusion_pipeline_with_model_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() inputs = self.get_inputs(torch_device, dtype=torch.float16) # Normal inference pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) outputs = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # With model offloading # Reload but don't move to cuda pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-base", torch_dtype=torch.float16, ) pipe.unet.set_default_attn_processor() torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device, dtype=torch.float16) outputs_offloaded = pipe(**inputs) mem_bytes_offloaded = torch.cuda.max_memory_allocated() images = outputs.images images_offloaded = outputs_offloaded.images max_diff = numpy_cosine_similarity_distance(images.flatten(), images_offloaded.flatten()) assert max_diff < 1e-3 assert mem_bytes_offloaded < mem_bytes assert mem_bytes_offloaded < 3 * 10**9 for module in pipe.text_encoder, pipe.unet, pipe.vae: assert module.device == torch.device("cpu") # With attention slicing torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe.enable_attention_slicing() _ = pipe(**inputs) mem_bytes_slicing = torch.cuda.max_memory_allocated() assert mem_bytes_slicing < mem_bytes_offloaded @nightly @require_torch_accelerator @skip_mps class StableDiffusion2PipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): _generator_device = "cpu" if not generator_device.startswith("cuda") else "cuda" if not str(device).startswith("mps"): generator = torch.Generator(device=_generator_device).manual_seed(seed) else: generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def test_stable_diffusion_2_0_default_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_0_base_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_2_1_default_pndm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_ddim(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = DDIMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_lms(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_euler(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = EulerDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_euler.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_stable_diffusion_dpm(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base").to(torch_device) sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 25 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_2_text2img/stable_diffusion_2_1_base_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_latent_upscale.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer import diffusers from diffusers import ( AutoencoderKL, EulerDiscreteScheduler, StableDiffusionLatentUpscalePipeline, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() def check_same_shape(tensor_list): shapes = [tensor.shape for tensor in tensor_list] return all(shape == shapes[0] for shape in shapes[1:]) class StableDiffusionLatentUpscalePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionLatentUpscalePipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - { "height", "width", "cross_attention_kwargs", "negative_prompt_embeds", "prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - {"num_images_per_prompt"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) @property def dummy_image(self): batch_size = 1 num_channels = 4 sizes = (16, 16) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image def get_dummy_components(self): torch.manual_seed(0) model = UNet2DConditionModel( act_fn="gelu", attention_head_dim=8, norm_num_groups=None, block_out_channels=[32, 32, 64, 64], time_cond_proj_dim=160, conv_in_kernel=1, conv_out_kernel=1, cross_attention_dim=32, down_block_types=( "KDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", "KCrossAttnDownBlock2D", ), in_channels=8, mid_block_type=None, only_cross_attention=False, out_channels=5, resnet_time_scale_shift="scale_shift", time_embedding_type="fourier", timestep_post_act="gelu", up_block_types=("KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KCrossAttnUpBlock2D", "KUpBlock2D"), ) vae = AutoencoderKL( block_out_channels=[32, 32, 64, 64], in_channels=3, out_channels=3, down_block_types=[ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", ], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) scheduler = EulerDiscreteScheduler(prediction_type="sample") text_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act="quick_gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": model.eval(), "vae": vae.eval(), "scheduler": scheduler, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": self.dummy_image.cpu(), "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 256, 256, 3)) expected_slice = np.array( [0.47222412, 0.41921633, 0.44717434, 0.46874192, 0.42588258, 0.46150726, 0.4677534, 0.45583832, 0.48579055] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=7e-3) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=3e-3) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=7e-3) def test_pt_np_pil_outputs_equivalent(self): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=3e-3) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=3e-3) def test_karras_schedulers_shape(self): skip_schedulers = [ "DDIMScheduler", "DDPMScheduler", "PNDMScheduler", "HeunDiscreteScheduler", "EulerAncestralDiscreteScheduler", "KDPM2DiscreteScheduler", "KDPM2AncestralDiscreteScheduler", "DPMSolverSDEScheduler", ] components = self.get_dummy_components() pipe = self.pipeline_class(**components) # make sure that PNDM does not need warm-up pipe.scheduler.register_to_config(skip_prk_steps=True) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = 2 outputs = [] for scheduler_enum in KarrasDiffusionSchedulers: if scheduler_enum.name in skip_schedulers: # no sigma schedulers are not supported # no schedulers continue scheduler_cls = getattr(diffusers, scheduler_enum.name) pipe.scheduler = scheduler_cls.from_config(pipe.scheduler.config) output = pipe(**inputs)[0] outputs.append(output) assert check_same_shape(outputs) def test_float16_inference(self): super().test_float16_inference(expected_max_diff=5e-1) @require_torch_gpu @slow class StableDiffusionLatentUpscalePipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_latent_upscaler_fp16(self): generator = torch.manual_seed(33) pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", torch_dtype=torch.float16) pipe.to("cuda") upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 ) upscaler.to("cuda") prompt = "a photo of an astronaut high resolution, unreal engine, ultra realistic" low_res_latents = pipe(prompt, generator=generator, output_type="latent").images image = upscaler( prompt=prompt, image=low_res_latents, num_inference_steps=20, guidance_scale=0, generator=generator, output_type="np", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy" ) assert np.abs((expected_image - image).mean()) < 5e-2 def test_latent_upscaler_fp16_image(self): generator = torch.manual_seed(33) upscaler = StableDiffusionLatentUpscalePipeline.from_pretrained( "stabilityai/sd-x2-latent-upscaler", torch_dtype=torch.float16 ) upscaler.to("cuda") prompt = "the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas" low_res_img = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png" ) image = upscaler( prompt=prompt, image=low_res_img, num_inference_steps=20, guidance_scale=0, generator=generator, output_type="np", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy" ) assert np.abs((expected_image - image).max()) < 5e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_diffedit.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, nightly, numpy_cosine_similarity_distance, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class StableDiffusionDiffEditPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = StableDiffusionDiffEditPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"} image_params = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) inverse_scheduler = DDIMInverseScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_zero=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "inverse_scheduler": inverse_scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): mask = floats_tensor((1, 16, 16), rng=random.Random(seed)).to(device) latents = floats_tensor((1, 2, 4, 16, 16), rng=random.Random(seed)).to(device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "a dog and a newt", "mask_image": mask, "image_latents": latents, "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_dummy_mask_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "source_prompt": "a cat and a frog", "target_prompt": "a dog and a newt", "generator": generator, "num_inference_steps": 2, "num_maps_per_mask": 2, "mask_encode_strength": 1.0, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def get_dummy_inversion_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] image = Image.fromarray(np.uint8(image)).convert("RGB") if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "image": image, "prompt": "a cat and a frog", "generator": generator, "num_inference_steps": 2, "inpaint_strength": 1.0, "guidance_scale": 6.0, "decode_latents": True, "output_type": "numpy", } return inputs def test_save_load_optional_components(self): if not hasattr(self.pipeline_class, "_optional_components"): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(pipe, optional_component, None) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components}) inputs = self.get_dummy_inputs(torch_device) output = pipe(**inputs)[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(tmpdir) pipe_loaded = self.pipeline_class.from_pretrained(tmpdir) pipe_loaded.to(torch_device) pipe_loaded.set_progress_bar_config(disable=None) for optional_component in pipe._optional_components: self.assertTrue( getattr(pipe_loaded, optional_component) is None, f"`{optional_component}` did not stay set to None after loading.", ) inputs = self.get_dummy_inputs(torch_device) output_loaded = pipe_loaded(**inputs)[0] max_diff = np.abs(output - output_loaded).max() self.assertLess(max_diff, 1e-4) def test_mask(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_mask_inputs(device) mask = pipe.generate_mask(**inputs) mask_slice = mask[0, -3:, -3:] self.assertEqual(mask.shape, (1, 16, 16)) expected_slice = np.array([0] * 9) max_diff = np.abs(mask_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) self.assertEqual(mask[0, -3, -4], 0) def test_inversion(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5160, 0.5115, 0.5060, 0.5456, 0.4704, 0.5060, 0.5019, 0.4405, 0.4726], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-3) def test_inversion_dpm(self): device = "cpu" components = self.get_dummy_components() scheduler_args = {"beta_start": 0.00085, "beta_end": 0.012, "beta_schedule": "scaled_linear"} components["scheduler"] = DPMSolverMultistepScheduler(**scheduler_args) components["inverse_scheduler"] = DPMSolverMultistepInverseScheduler(**scheduler_args) pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inversion_inputs(device) image = pipe.invert(**inputs).images image_slice = image[0, -1, -3:, -3:] self.assertEqual(image.shape, (2, 32, 32, 3)) expected_slice = np.array( [0.5305, 0.4673, 0.5314, 0.5308, 0.4886, 0.5279, 0.5142, 0.4724, 0.4892], ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) @require_torch_gpu @nightly class StableDiffusionDiffEditPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((256, 256)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_full(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1-base", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.scheduler.clip_sample = True pipe.inverse_scheduler = DDIMInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=5, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=5, output_type="np", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((256, 256)) ) / 255 ) assert numpy_cosine_similarity_distance(expected_image.flatten(), image.flatten()) < 2e-1 @nightly @require_torch_gpu class StableDiffusionDiffEditPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def setUpClass(cls): raw_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) raw_image = raw_image.convert("RGB").resize((768, 768)) cls.raw_image = raw_image def test_stable_diffusion_diffedit_dpm(self): generator = torch.manual_seed(0) pipe = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", safety_checker=None, torch_dtype=torch.float16 ) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.inverse_scheduler = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) source_prompt = "a bowl of fruit" target_prompt = "a bowl of pears" mask_image = pipe.generate_mask( image=self.raw_image, source_prompt=source_prompt, target_prompt=target_prompt, generator=generator, ) inv_latents = pipe.invert( prompt=source_prompt, image=self.raw_image, inpaint_strength=0.7, generator=generator, num_inference_steps=25, ).latents image = pipe( prompt=target_prompt, mask_image=mask_image, image_latents=inv_latents, generator=generator, negative_prompt=source_prompt, inpaint_strength=0.7, num_inference_steps=25, output_type="numpy", ).images[0] expected_image = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((768, 768)) ) / 255 ) assert np.abs((expected_image - image).max()) < 5e-1
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_attend_and_excite.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( load_numpy, nightly, numpy_cosine_similarity_distance, require_torch_gpu, skip_mps, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin torch.backends.cuda.matmul.allow_tf32 = False @skip_mps class StableDiffusionAttendAndExcitePipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionAttendAndExcitePipeline test_attention_slicing = False params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS.union({"token_indices"}) image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS # Attend and excite requires being able to run a backward pass at # inference time. There's no deterministic backward operator for pad @classmethod def setUpClass(cls): super().setUpClass() torch.use_deterministic_algorithms(False) @classmethod def tearDownClass(cls): super().tearDownClass() torch.use_deterministic_algorithms(True) def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = inputs = { "prompt": "a cat and a frog", "token_indices": [2, 5], "generator": generator, "num_inference_steps": 1, "guidance_scale": 6.0, "output_type": "numpy", "max_iter_to_alter": 2, "thresholds": {0: 0.7}, } return inputs def test_inference(self): device = "cpu" components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 64, 64, 3)) expected_slice = np.array( [0.63905364, 0.62897307, 0.48599017, 0.5133624, 0.5550048, 0.45769516, 0.50326973, 0.5023139, 0.45384496] ) max_diff = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(max_diff, 1e-3) def test_sequential_cpu_offload_forward_pass(self): super().test_sequential_cpu_offload_forward_pass(expected_max_diff=5e-4) def test_inference_batch_consistent(self): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2]) def test_inference_batch_single_identical(self): self._test_inference_batch_single_identical(batch_size=2, expected_max_diff=7e-4) def test_dict_tuple_outputs_equivalent(self): super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3) def test_pt_np_pil_outputs_equivalent(self): super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4) def test_save_load_local(self): super().test_save_load_local(expected_max_difference=5e-4) def test_save_load_optional_components(self): super().test_save_load_optional_components(expected_max_difference=4e-4) @require_torch_gpu @nightly class StableDiffusionAttendAndExcitePipelineIntegrationTests(unittest.TestCase): # Attend and excite requires being able to run a backward pass at # inference time. There's no deterministic backward operator for pad @classmethod def setUpClass(cls): super().setUpClass() torch.use_deterministic_algorithms(False) @classmethod def tearDownClass(cls): super().tearDownClass() torch.use_deterministic_algorithms(True) def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_attend_and_excite_fp16(self): generator = torch.manual_seed(51) pipe = StableDiffusionAttendAndExcitePipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", safety_checker=None, torch_dtype=torch.float16 ) pipe.to("cuda") prompt = "a painting of an elephant with glasses" token_indices = [5, 7] image = pipe( prompt=prompt, token_indices=token_indices, guidance_scale=7.5, generator=generator, num_inference_steps=5, max_iter_to_alter=5, output_type="numpy", ).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy" ) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 5e-1
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_flax_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers.utils import is_flax_available, load_image from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class FlaxStableDiffusionInpaintPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def test_stable_diffusion_inpaint_pipeline(self): init_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-inpaint/init_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" ) model_id = "xvjiarui/stable-diffusion-2-inpainting" pipeline, params = FlaxStableDiffusionInpaintPipeline.from_pretrained(model_id, safety_checker=None) prompt = "Face of a yellow cat, high resolution, sitting on a park bench" prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] init_image = num_samples * [init_image] mask_image = num_samples * [mask_image] prompt_ids, processed_masked_images, processed_masks = pipeline.prepare_inputs(prompt, init_image, mask_image) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, jax.device_count()) prompt_ids = shard(prompt_ids) processed_masked_images = shard(processed_masked_images) processed_masks = shard(processed_masks) output = pipeline( prompt_ids, processed_masks, processed_masked_images, params, prng_seed, num_inference_steps, jit=True ) images = output.images.reshape(num_samples, 512, 512, 3) image_slice = images[0, 253:256, 253:256, -1] output_slice = jnp.asarray(jax.device_get(image_slice.flatten())) expected_slice = jnp.array( [0.3611307, 0.37649736, 0.3757408, 0.38213953, 0.39295167, 0.3841631, 0.41554978, 0.4137475, 0.4217084] ) print(f"output_slice: {output_slice}") assert jnp.abs(output_slice - expected_slice).max() < 1e-2
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_upscale.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class StableDiffusionUpscalePipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_image(self): batch_size = 1 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device) return image @property def dummy_cond_unet_upscale(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 32, 64), layers_per_block=2, sample_size=32, in_channels=7, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=8, use_linear_projection=True, only_cross_attention=(True, True, False), num_class_embeds=100, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=512, ) return CLIPTextModel(config) def test_stable_diffusion_upscale(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet_upscale low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") vae = self.dummy_vae text_encoder = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( [prompt], image=low_res_image, generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ) image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], image=low_res_image, generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] expected_height_width = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_upscale_batch(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet_upscale low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") vae = self.dummy_vae text_encoder = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" output = sd_pipe( 2 * [prompt], image=2 * [low_res_image], guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ) image = output.images assert image.shape[0] == 2 generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( [prompt], image=low_res_image, generator=generator, num_images_per_prompt=2, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ) image = output.images assert image.shape[0] == 2 def test_stable_diffusion_upscale_prompt_embeds(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet_upscale low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") vae = self.dummy_vae text_encoder = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe( [prompt], image=low_res_image, generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ) image = output.images generator = torch.Generator(device=device).manual_seed(0) prompt_embeds, negative_prompt_embeds = sd_pipe.encode_prompt(prompt, device, 1, False) if negative_prompt_embeds is not None: prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds]) image_from_prompt_embeds = sd_pipe( prompt_embeds=prompt_embeds, image=[low_res_image], generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_prompt_embeds_slice = image_from_prompt_embeds[0, -3:, -3:, -1] expected_height_width = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) expected_slice = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_prompt_embeds_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_upscale_fp16(self): """Test that stable diffusion upscale works with fp16""" unet = self.dummy_cond_unet_upscale low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") vae = self.dummy_vae text_encoder = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) # put models in fp16, except vae as it overflows in fp16 unet = unet.half() text_encoder = text_encoder.half() # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=unet, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) image = sd_pipe( [prompt], image=low_res_image, generator=generator, num_inference_steps=2, output_type="np", ).images expected_height_width = low_res_image.size[0] * 4 assert image.shape == (1, expected_height_width, expected_height_width, 3) def test_stable_diffusion_upscale_from_save_pretrained(self): pipes = [] device = "cpu" # ensure determinism for the device-dependent torch.Generator low_res_scheduler = DDPMScheduler() scheduler = DDIMScheduler(prediction_type="v_prediction") tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionUpscalePipeline( unet=self.dummy_cond_unet_upscale, low_res_scheduler=low_res_scheduler, scheduler=scheduler, vae=self.dummy_vae, text_encoder=self.dummy_text_encoder, tokenizer=tokenizer, max_noise_level=350, ) sd_pipe = sd_pipe.to(device) pipes.append(sd_pipe) with tempfile.TemporaryDirectory() as tmpdirname: sd_pipe.save_pretrained(tmpdirname) sd_pipe = StableDiffusionUpscalePipeline.from_pretrained(tmpdirname).to(device) pipes.append(sd_pipe) prompt = "A painting of a squirrel eating a burger" image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0] low_res_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((64, 64)) image_slices = [] for pipe in pipes: generator = torch.Generator(device=device).manual_seed(0) image = pipe( [prompt], image=low_res_image, generator=generator, guidance_scale=6.0, noise_level=20, num_inference_steps=2, output_type="np", ).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 @slow @require_torch_gpu class StableDiffusionUpscalePipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_upscale_pipeline(self): image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat.npy" ) model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained(model_id) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "a cat sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 1e-3 def test_stable_diffusion_upscale_pipeline_fp16(self): image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale" "/upsampled_cat_fp16.npy" ) model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained( model_id, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "a cat sitting on a park bench" generator = torch.manual_seed(0) output = pipe( prompt=prompt, image=image, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained( model_id, torch_dtype=torch.float16, ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() prompt = "a cat sitting on a park bench" generator = torch.manual_seed(0) _ = pipe( prompt=prompt, image=image, generator=generator, num_inference_steps=5, output_type="np", ) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.9 GB is allocated assert mem_bytes < 2.9 * 10**9 def test_download_ckpt_diff_format_is_same(self): image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/sd2-upscale/low_res_cat.png" ) prompt = "a cat sitting on a park bench" model_id = "stabilityai/stable-diffusion-x4-upscaler" pipe = StableDiffusionUpscalePipeline.from_pretrained(model_id) pipe.enable_model_cpu_offload() generator = torch.Generator("cpu").manual_seed(0) output = pipe(prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3) image_from_pretrained = output.images[0] single_file_path = ( "https://huggingface.co/stabilityai/stable-diffusion-x4-upscaler/blob/main/x4-upscaler-ema.safetensors" ) pipe_from_single_file = StableDiffusionUpscalePipeline.from_single_file(single_file_path) pipe_from_single_file.enable_model_cpu_offload() generator = torch.Generator("cpu").manual_seed(0) output_from_single_file = pipe_from_single_file( prompt=prompt, image=image, generator=generator, output_type="np", num_inference_steps=3 ) image_from_single_file = output_from_single_file.images[0] assert image_from_pretrained.shape == (512, 512, 3) assert image_from_single_file.shape == (512, 512, 3) assert ( numpy_cosine_similarity_distance(image_from_pretrained.flatten(), image_from_single_file.flatten()) < 1e-3 )
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/stable_diffusion_2/test_stable_diffusion_v_pred.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import time import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, StableDiffusionPipeline, UNet2DConditionModel, ) from diffusers.models.attention_processor import AttnProcessor from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) enable_full_determinism() class StableDiffusion2VPredictionPipelineFastTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def dummy_cond_unet(self): torch.manual_seed(0) model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, ) return model @property def dummy_vae(self): torch.manual_seed(0) model = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) return model @property def dummy_text_encoder(self): torch.manual_seed(0) config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=64, ) return CLIPTextModel(config) def test_stable_diffusion_v_pred_ddim(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, prediction_type="v_prediction", ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=None, image_encoder=None, requires_safety_checker=False, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6569, 0.6525, 0.5142, 0.4968, 0.4923, 0.4601, 0.4996, 0.5041, 0.4544]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_v_pred_k_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator unet = self.dummy_cond_unet scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", prediction_type="v_prediction" ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=None, image_encoder=None, requires_safety_checker=False, ) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.Generator(device=device).manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np") image = output.images generator = torch.Generator(device=device).manual_seed(0) image_from_tuple = sd_pipe( [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np", return_dict=False, )[0] image_slice = image[0, -3:, -3:, -1] image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5644, 0.6514, 0.5190, 0.5663, 0.5287, 0.4953, 0.5430, 0.5243, 0.4778]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2 @unittest.skipIf(torch_device != "cuda", "This test requires a GPU") def test_stable_diffusion_v_pred_fp16(self): """Test that stable diffusion v-prediction works with fp16""" unet = self.dummy_cond_unet scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, prediction_type="v_prediction", ) vae = self.dummy_vae bert = self.dummy_text_encoder tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") # put models in fp16 unet = unet.half() vae = vae.half() bert = bert.half() # make sure here that pndm scheduler skips prk sd_pipe = StableDiffusionPipeline( unet=unet, scheduler=scheduler, vae=vae, text_encoder=bert, tokenizer=tokenizer, safety_checker=None, feature_extractor=None, image_encoder=None, requires_safety_checker=False, ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) image = sd_pipe([prompt], generator=generator, num_inference_steps=2, output_type="np").images assert image.shape == (1, 64, 64, 3) @slow @require_torch_gpu class StableDiffusion2VPredictionPipelineIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_v_pred_default(self): sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2") sd_pipe = sd_pipe.to(torch_device) sd_pipe.enable_attention_slicing() sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 768, 768, 3) expected_slice = np.array([0.1868, 0.1922, 0.1527, 0.1921, 0.1908, 0.1624, 0.1779, 0.1652, 0.1734]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_v_pred_upcast_attention(self): sd_pipe = StableDiffusionPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16 ) sd_pipe = sd_pipe.to(torch_device) sd_pipe.enable_attention_slicing() sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=20, output_type="np") image = output.images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 768, 768, 3) expected_slice = np.array([0.4209, 0.4087, 0.4097, 0.4209, 0.3860, 0.4329, 0.4280, 0.4324, 0.4187]) assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2 def test_stable_diffusion_v_pred_euler(self): scheduler = EulerDiscreteScheduler.from_pretrained("stabilityai/stable-diffusion-2", subfolder="scheduler") sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler) sd_pipe = sd_pipe.to(torch_device) sd_pipe.enable_attention_slicing() sd_pipe.set_progress_bar_config(disable=None) prompt = "A painting of a squirrel eating a burger" generator = torch.manual_seed(0) output = sd_pipe([prompt], generator=generator, num_inference_steps=5, output_type="numpy") image = output.images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 768, 768, 3) expected_slice = np.array([0.1781, 0.1695, 0.1661, 0.1705, 0.1588, 0.1699, 0.2005, 0.1589, 0.1677]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_v_pred_dpm(self): """ TODO: update this test after making DPM compatible with V-prediction! """ scheduler = DPMSolverMultistepScheduler.from_pretrained( "stabilityai/stable-diffusion-2", subfolder="scheduler" ) sd_pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", scheduler=scheduler) sd_pipe = sd_pipe.to(torch_device) sd_pipe.enable_attention_slicing() sd_pipe.set_progress_bar_config(disable=None) prompt = "a photograph of an astronaut riding a horse" generator = torch.manual_seed(0) image = sd_pipe( [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=5, output_type="numpy" ).images image_slice = image[0, 253:256, 253:256, -1] assert image.shape == (1, 768, 768, 3) expected_slice = np.array([0.3303, 0.3184, 0.3291, 0.3300, 0.3256, 0.3113, 0.2965, 0.3134, 0.3192]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_attention_slicing_v_pred(self): torch.cuda.reset_peak_memory_stats() model_id = "stabilityai/stable-diffusion-2" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "a photograph of an astronaut riding a horse" # make attention efficient pipe.enable_attention_slicing() generator = torch.manual_seed(0) output_chunked = pipe( [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy" ) image_chunked = output_chunked.images mem_bytes = torch.cuda.max_memory_allocated() torch.cuda.reset_peak_memory_stats() # make sure that less than 5.5 GB is allocated assert mem_bytes < 5.5 * 10**9 # disable slicing pipe.disable_attention_slicing() generator = torch.manual_seed(0) output = pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy") image = output.images # make sure that more than 3.0 GB is allocated mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes > 3 * 10**9 max_diff = numpy_cosine_similarity_distance(image.flatten(), image_chunked.flatten()) assert max_diff < 1e-3 def test_stable_diffusion_text2img_pipeline_v_pred_default(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "sd2-text2img/astronaut_riding_a_horse_v_pred.npy" ) pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2") pipe.to(torch_device) pipe.enable_attention_slicing() pipe.set_progress_bar_config(disable=None) prompt = "astronaut riding a horse" generator = torch.manual_seed(0) output = pipe(prompt=prompt, guidance_scale=7.5, generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-3 def test_stable_diffusion_text2img_pipeline_unflawed(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "sd2-text2img/lion_galaxy.npy" ) pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1") pipe.scheduler = DDIMScheduler.from_config( pipe.scheduler.config, timestep_spacing="trailing", rescale_betas_zero_snr=True ) pipe.to(torch_device) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=None) prompt = "A lion in galaxies, spirals, nebulae, stars, smoke, iridescent, intricate detail, octane render, 8k" generator = torch.Generator("cpu").manual_seed(0) output = pipe( prompt=prompt, guidance_scale=7.5, num_inference_steps=10, guidance_rescale=0.7, generator=generator, output_type="np", ) image = output.images[0] assert image.shape == (768, 768, 3) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 5e-2 def test_stable_diffusion_text2img_pipeline_v_pred_fp16(self): expected_image = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/" "sd2-text2img/astronaut_riding_a_horse_v_pred_fp16.npy" ) pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) prompt = "astronaut riding a horse" generator = torch.manual_seed(0) output = pipe(prompt=prompt, guidance_scale=7.5, generator=generator, output_type="np") image = output.images[0] assert image.shape == (768, 768, 3) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-3 def test_download_local(self): filename = hf_hub_download("stabilityai/stable-diffusion-2-1", filename="v2-1_768-ema-pruned.safetensors") pipe = StableDiffusionPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() image_out = pipe("test", num_inference_steps=1, output_type="np").images[0] assert image_out.shape == (768, 768, 3) def test_download_ckpt_diff_format_is_same(self): single_file_path = ( "https://huggingface.co/stabilityai/stable-diffusion-2-1/blob/main/v2-1_768-ema-pruned.safetensors" ) pipe_single = StableDiffusionPipeline.from_single_file(single_file_path) pipe_single.scheduler = DDIMScheduler.from_config(pipe_single.scheduler.config) pipe_single.unet.set_attn_processor(AttnProcessor()) pipe_single.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image_ckpt = pipe_single("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_ckpt.flatten()) assert max_diff < 1e-3 def test_stable_diffusion_text2img_intermediate_state_v_pred(self): number_of_steps = 0 def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None: test_callback_fn.has_been_called = True nonlocal number_of_steps number_of_steps += 1 if step == 0: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 96, 96) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([0.7749, 0.0325, 0.5088, 0.1619, 0.3372, 0.3667, -0.5186, 0.6860, 1.4326]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 elif step == 19: latents = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 96, 96) latents_slice = latents[0, -3:, -3:, -1] expected_slice = np.array([1.3887, 1.0273, 1.7266, 0.0726, 0.6611, 0.1598, -1.0547, 0.1522, 0.0227]) assert np.abs(latents_slice.flatten() - expected_slice).max() < 5e-2 test_callback_fn.has_been_called = False pipe = StableDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2", torch_dtype=torch.float16) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() prompt = "Andromeda galaxy in a bottle" generator = torch.manual_seed(0) pipe( prompt=prompt, num_inference_steps=20, guidance_scale=7.5, generator=generator, callback=test_callback_fn, callback_steps=1, ) assert test_callback_fn.has_been_called assert number_of_steps == 20 def test_stable_diffusion_low_cpu_mem_usage_v_pred(self): pipeline_id = "stabilityai/stable-diffusion-2" start_time = time.time() pipeline_low_cpu_mem_usage = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline_low_cpu_mem_usage.to(torch_device) low_cpu_mem_usage_time = time.time() - start_time start_time = time.time() _ = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16, low_cpu_mem_usage=False) normal_load_time = time.time() - start_time assert 2 * low_cpu_mem_usage_time < normal_load_time def test_stable_diffusion_pipeline_with_sequential_cpu_offloading_v_pred(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipeline_id = "stabilityai/stable-diffusion-2" prompt = "Andromeda galaxy in a bottle" pipeline = StableDiffusionPipeline.from_pretrained(pipeline_id, torch_dtype=torch.float16) pipeline = pipeline.to(torch_device) pipeline.enable_attention_slicing(1) pipeline.enable_sequential_cpu_offload() generator = torch.manual_seed(0) _ = pipeline(prompt, generator=generator, num_inference_steps=5) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.8 GB is allocated assert mem_bytes < 2.8 * 10**9
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/amused/test_amused_img2img.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AmusedImg2ImgPipeline, AmusedScheduler, UVit2DModel, VQModel from diffusers.utils import load_image from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AmusedImg2ImgPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AmusedImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "latents"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", } def get_dummy_components(self): torch.manual_seed(0) transformer = UVit2DModel( hidden_size=32, use_bias=False, hidden_dropout=0.0, cond_embed_dim=32, micro_cond_encode_dim=2, micro_cond_embed_dim=10, encoder_hidden_size=32, vocab_size=32, codebook_size=32, in_channels=32, block_out_channels=32, num_res_blocks=1, downsample=True, upsample=True, block_num_heads=1, num_hidden_layers=1, num_attention_heads=1, attention_dropout=0.0, intermediate_size=32, layer_norm_eps=1e-06, ln_elementwise_affine=True, ) scheduler = AmusedScheduler(mask_token_id=31) torch.manual_seed(0) vqvae = VQModel( act_fn="silu", block_out_channels=[32], down_block_types=[ "DownEncoderBlock2D", ], in_channels=3, latent_channels=32, layers_per_block=2, norm_num_groups=32, num_vq_embeddings=32, out_channels=3, sample_size=32, up_block_types=[ "UpDecoderBlock2D", ], mid_block_add_attention=False, lookup_from_codebook=True, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, projection_dim=32, ) text_encoder = CLIPTextModelWithProjection(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "transformer": transformer, "scheduler": scheduler, "vqvae": vqvae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = torch.full((1, 3, 4, 4), 1.0, dtype=torch.float32, device=device) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "np", "image": image, } return inputs def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") def test_inference_batch_single_identical(self): ... @slow @require_torch_gpu class AmusedImg2ImgPipelineSlowTests(unittest.TestCase): def test_amused_256(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((256, 256)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.9993, 1.0, 0.9996, 1.0, 0.9995, 0.9925, 0.9990, 0.9954, 1.0]) assert np.abs(image_slice - expected_slice).max() < 1e-2 def test_amused_256_fp16(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-256", torch_dtype=torch.float16, variant="fp16") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((256, 256)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.9980, 0.9980, 0.9940, 0.9944, 0.9960, 0.9908, 1.0, 1.0, 0.9986]) assert np.abs(image_slice - expected_slice).max() < 1e-2 def test_amused_512(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-512") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((512, 512)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1344, 0.0985, 0.0, 0.1194, 0.1809, 0.0765, 0.0854, 0.1371, 0.0933]) assert np.abs(image_slice - expected_slice).max() < 0.1 def test_amused_512_fp16(self): pipe = AmusedImg2ImgPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16) pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains.jpg") .resize((512, 512)) .convert("RGB") ) image = pipe( "winter mountains", image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1536, 0.1767, 0.0227, 0.1079, 0.2400, 0.1427, 0.1511, 0.1564, 0.1542]) assert np.abs(image_slice - expected_slice).max() < 0.1
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/amused/test_amused_inpaint.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AmusedInpaintPipeline, AmusedScheduler, UVit2DModel, VQModel from diffusers.utils import load_image from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AmusedInpaintPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AmusedInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS required_optional_params = PipelineTesterMixin.required_optional_params - { "latents", } def get_dummy_components(self): torch.manual_seed(0) transformer = UVit2DModel( hidden_size=32, use_bias=False, hidden_dropout=0.0, cond_embed_dim=32, micro_cond_encode_dim=2, micro_cond_embed_dim=10, encoder_hidden_size=32, vocab_size=32, codebook_size=32, in_channels=32, block_out_channels=32, num_res_blocks=1, downsample=True, upsample=True, block_num_heads=1, num_hidden_layers=1, num_attention_heads=1, attention_dropout=0.0, intermediate_size=32, layer_norm_eps=1e-06, ln_elementwise_affine=True, ) scheduler = AmusedScheduler(mask_token_id=31) torch.manual_seed(0) vqvae = VQModel( act_fn="silu", block_out_channels=[32], down_block_types=[ "DownEncoderBlock2D", ], in_channels=3, latent_channels=32, layers_per_block=2, norm_num_groups=32, num_vq_embeddings=32, out_channels=3, sample_size=32, up_block_types=[ "UpDecoderBlock2D", ], mid_block_add_attention=False, lookup_from_codebook=True, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, projection_dim=32, ) text_encoder = CLIPTextModelWithProjection(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "transformer": transformer, "scheduler": scheduler, "vqvae": vqvae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) image = torch.full((1, 3, 4, 4), 1.0, dtype=torch.float32, device=device) mask_image = torch.full((1, 1, 4, 4), 1.0, dtype=torch.float32, device=device) mask_image[0, 0, 0, 0] = 0 mask_image[0, 0, 0, 1] = 0 inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "np", "image": image, "mask_image": mask_image, } return inputs def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") def test_inference_batch_single_identical(self): ... @slow @require_torch_gpu class AmusedInpaintPipelineSlowTests(unittest.TestCase): def test_amused_256(self): pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg") .resize((256, 256)) .convert("RGB") ) mask_image = ( load_image( "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png" ) .resize((256, 256)) .convert("L") ) image = pipe( "winter mountains", image, mask_image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.0699, 0.0716, 0.0608, 0.0715, 0.0797, 0.0638, 0.0802, 0.0924, 0.0634]) assert np.abs(image_slice - expected_slice).max() < 0.1 def test_amused_256_fp16(self): pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-256", variant="fp16", torch_dtype=torch.float16) pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg") .resize((256, 256)) .convert("RGB") ) mask_image = ( load_image( "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png" ) .resize((256, 256)) .convert("L") ) image = pipe( "winter mountains", image, mask_image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.0735, 0.0749, 0.0650, 0.0739, 0.0805, 0.0667, 0.0802, 0.0923, 0.0622]) assert np.abs(image_slice - expected_slice).max() < 0.1 def test_amused_512(self): pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-512") pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg") .resize((512, 512)) .convert("RGB") ) mask_image = ( load_image( "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png" ) .resize((512, 512)) .convert("L") ) image = pipe( "winter mountains", image, mask_image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0005, 0.0]) assert np.abs(image_slice - expected_slice).max() < 0.05 def test_amused_512_fp16(self): pipe = AmusedInpaintPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16) pipe.to(torch_device) image = ( load_image("https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1.jpg") .resize((512, 512)) .convert("RGB") ) mask_image = ( load_image( "https://huggingface.co/datasets/diffusers/docs-images/resolve/main/open_muse/mountains_1_mask.png" ) .resize((512, 512)) .convert("L") ) image = pipe( "winter mountains", image, mask_image, generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np", ).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0025, 0.0]) assert np.abs(image_slice - expected_slice).max() < 3e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/amused/test_amused.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import AmusedPipeline, AmusedScheduler, UVit2DModel, VQModel from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class AmusedPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = AmusedPipeline params = TEXT_TO_IMAGE_PARAMS | {"encoder_hidden_states", "negative_encoder_hidden_states"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) transformer = UVit2DModel( hidden_size=32, use_bias=False, hidden_dropout=0.0, cond_embed_dim=32, micro_cond_encode_dim=2, micro_cond_embed_dim=10, encoder_hidden_size=32, vocab_size=32, codebook_size=32, in_channels=32, block_out_channels=32, num_res_blocks=1, downsample=True, upsample=True, block_num_heads=1, num_hidden_layers=1, num_attention_heads=1, attention_dropout=0.0, intermediate_size=32, layer_norm_eps=1e-06, ln_elementwise_affine=True, ) scheduler = AmusedScheduler(mask_token_id=31) torch.manual_seed(0) vqvae = VQModel( act_fn="silu", block_out_channels=[32], down_block_types=[ "DownEncoderBlock2D", ], in_channels=3, latent_channels=32, layers_per_block=2, norm_num_groups=32, num_vq_embeddings=32, out_channels=3, sample_size=32, up_block_types=[ "UpDecoderBlock2D", ], mid_block_add_attention=False, lookup_from_codebook=True, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, projection_dim=32, ) text_encoder = CLIPTextModelWithProjection(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "transformer": transformer, "scheduler": scheduler, "vqvae": vqvae, "text_encoder": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "np", "height": 4, "width": 4, } return inputs def test_inference_batch_consistent(self, batch_sizes=[2]): self._test_inference_batch_consistent(batch_sizes=batch_sizes, batch_generator=False) @unittest.skip("aMUSEd does not support lists of generators") def test_inference_batch_single_identical(self): ... @slow @require_torch_gpu class AmusedPipelineSlowTests(unittest.TestCase): def test_amused_256(self): pipe = AmusedPipeline.from_pretrained("amused/amused-256") pipe.to(torch_device) image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.4011, 0.3992, 0.3790, 0.3856, 0.3772, 0.3711, 0.3919, 0.3850, 0.3625]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_amused_256_fp16(self): pipe = AmusedPipeline.from_pretrained("amused/amused-256", variant="fp16", torch_dtype=torch.float16) pipe.to(torch_device) image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.0554, 0.05129, 0.0344, 0.0452, 0.0476, 0.0271, 0.0495, 0.0527, 0.0158]) assert np.abs(image_slice - expected_slice).max() < 7e-3 def test_amused_512(self): pipe = AmusedPipeline.from_pretrained("amused/amused-512") pipe.to(torch_device) image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9960, 0.9960, 0.9946, 0.9980, 0.9947, 0.9932, 0.9960, 0.9961, 0.9947]) assert np.abs(image_slice - expected_slice).max() < 3e-3 def test_amused_512_fp16(self): pipe = AmusedPipeline.from_pretrained("amused/amused-512", variant="fp16", torch_dtype=torch.float16) pipe.to(torch_device) image = pipe("dog", generator=torch.Generator().manual_seed(0), num_inference_steps=2, output_type="np").images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9983, 1.0, 1.0, 1.0, 1.0, 0.9989, 0.9994, 0.9976, 0.9977]) assert np.abs(image_slice - expected_slice).max() < 3e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_consistency_models/test_latent_consistency_models_img2img.py
import gc import inspect import random import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, LatentConsistencyModelImg2ImgPipeline, LCMScheduler, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class LatentConsistencyModelImg2ImgPipelineFastTests( PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = LatentConsistencyModelImg2ImgPipeline params = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "negative_prompt", "negative_prompt_embeds"} required_optional_params = PipelineTesterMixin.required_optional_params - {"latents", "negative_prompt"} batch_params = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS image_params = IMAGE_TO_IMAGE_IMAGE_PARAMS image_latents_params = IMAGE_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, time_cond_proj_dim=32, ) scheduler = LCMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, "requires_safety_checker": False, } return components def get_dummy_inputs(self, device, seed=0): image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image / 2 + 0.5 if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_lcm_onestep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 output = pipe(**inputs) image = output.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.4388, 0.3717, 0.2202, 0.7213, 0.6370, 0.3664, 0.5815, 0.6080, 0.4977]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_multistep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) image = output.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.4150, 0.3719, 0.2479, 0.6333, 0.6024, 0.3778, 0.5036, 0.5420, 0.4678]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = pipe(**inputs) image = output.images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.3994, 0.3471, 0.2540, 0.7030, 0.6193, 0.3645, 0.5777, 0.5850, 0.4965]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) # override default test because the final latent variable is "denoised" instead of "latents" def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["denoised"] = torch.zeros_like(callback_kwargs["denoised"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class LatentConsistencyModelImg2ImgPipelineSlowTests(unittest.TestCase): def setUp(self): gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_img2img/sketch-mountains-input.png" ) init_image = init_image.resize((512, 512)) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", "image": init_image, } return inputs def test_lcm_onestep(self): pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", safety_checker=None ) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.1950, 0.1961, 0.2308, 0.1786, 0.1837, 0.2320, 0.1898, 0.1885, 0.2309]) assert np.abs(image_slice - expected_slice).max() < 1e-3 def test_lcm_multistep(self): pipe = LatentConsistencyModelImg2ImgPipeline.from_pretrained( "SimianLuo/LCM_Dreamshaper_v7", safety_checker=None ) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.3756, 0.3816, 0.3767, 0.3718, 0.3739, 0.3735, 0.3863, 0.3803, 0.3563]) assert np.abs(image_slice - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/latent_consistency_models/test_latent_consistency_models.py
import gc import inspect import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, LatentConsistencyModelPipeline, LCMScheduler, UNet2DConditionModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class LatentConsistencyModelPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase): pipeline_class = LatentConsistencyModelPipeline params = TEXT_TO_IMAGE_PARAMS - {"negative_prompt", "negative_prompt_embeds"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS - {"negative_prompt"} image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(4, 8), layers_per_block=1, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, norm_num_groups=2, time_cond_proj_dim=32, ) scheduler = LCMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[4, 8], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, norm_num_groups=2, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=64, layer_norm_eps=1e-05, num_attention_heads=8, num_hidden_layers=3, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, "requires_safety_checker": False, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_lcm_onestep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LatentConsistencyModelPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 output = pipe(**inputs) image = output.images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1441, 0.5304, 0.5452, 0.1361, 0.4011, 0.4370, 0.5326, 0.3492, 0.3637]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_multistep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LatentConsistencyModelPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = pipe(**inputs) image = output.images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LatentConsistencyModelPipeline(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] output = pipe(**inputs) image = output.images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1403, 0.5072, 0.5316, 0.1202, 0.3865, 0.4211, 0.5363, 0.3557, 0.3645]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=5e-4) # skip because lcm pipeline apply cfg differently def test_callback_cfg(self): pass # override default test because the final latent variable is "denoised" instead of "latents" def test_callback_inputs(self): sig = inspect.signature(self.pipeline_class.__call__) if not ("callback_on_step_end_tensor_inputs" in sig.parameters and "callback_on_step_end" in sig.parameters): return components = self.get_dummy_components() pipe = self.pipeline_class(**components) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) self.assertTrue( hasattr(pipe, "_callback_tensor_inputs"), f" {self.pipeline_class} should have `_callback_tensor_inputs` that defines a list of tensor variables its callback function can use as inputs", ) def callback_inputs_test(pipe, i, t, callback_kwargs): missing_callback_inputs = set() for v in pipe._callback_tensor_inputs: if v not in callback_kwargs: missing_callback_inputs.add(v) self.assertTrue( len(missing_callback_inputs) == 0, f"Missing callback tensor inputs: {missing_callback_inputs}" ) last_i = pipe.num_timesteps - 1 if i == last_i: callback_kwargs["denoised"] = torch.zeros_like(callback_kwargs["denoised"]) return callback_kwargs inputs = self.get_dummy_inputs(torch_device) inputs["callback_on_step_end"] = callback_inputs_test inputs["callback_on_step_end_tensor_inputs"] = pipe._callback_tensor_inputs inputs["output_type"] = "latent" output = pipe(**inputs)[0] assert output.abs().sum() == 0 @slow @require_torch_gpu class LatentConsistencyModelPipelineSlowTests(unittest.TestCase): def setUp(self): gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 64, 64)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "a photograph of an astronaut riding a horse", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_lcm_onestep(self): pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.1025, 0.0911, 0.0984, 0.0981, 0.0901, 0.0918, 0.1055, 0.0940, 0.0730]) assert np.abs(image_slice - expected_slice).max() < 1e-3 def test_lcm_multistep(self): pipe = LatentConsistencyModelPipeline.from_pretrained("SimianLuo/LCM_Dreamshaper_v7", safety_checker=None) pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images assert image.shape == (1, 512, 512, 3) image_slice = image[0, -3:, -3:, -1].flatten() expected_slice = np.array([0.01855, 0.01855, 0.01489, 0.01392, 0.01782, 0.01465, 0.01831, 0.02539, 0.0]) assert np.abs(image_slice - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests/pipelines
hf_public_repos/diffusers/tests/pipelines/consistency_models/test_consistency_models.py
import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNet2DModel, ) from diffusers.utils.testing_utils import ( enable_full_determinism, nightly, require_torch_2, require_torch_gpu, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class ConsistencyModelPipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = ConsistencyModelPipeline params = UNCONDITIONAL_IMAGE_GENERATION_PARAMS batch_params = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt required_optional_params = frozenset( [ "num_inference_steps", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) @property def dummy_uncond_unet(self): unet = UNet2DModel.from_pretrained( "diffusers/consistency-models-test", subfolder="test_unet", ) return unet @property def dummy_cond_unet(self): unet = UNet2DModel.from_pretrained( "diffusers/consistency-models-test", subfolder="test_unet_class_cond", ) return unet def get_dummy_components(self, class_cond=False): if class_cond: unet = self.dummy_cond_unet else: unet = self.dummy_uncond_unet # Default to CM multistep sampler scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) components = { "unet": unet, "scheduler": scheduler, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "batch_size": 1, "num_inference_steps": None, "timesteps": [22, 0], "generator": generator, "output_type": "np", } return inputs def test_consistency_model_pipeline_multistep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = ConsistencyModelPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_consistency_model_pipeline_multistep_class_cond(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(class_cond=True) pipe = ConsistencyModelPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["class_labels"] = 0 image = pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.3572, 0.6273, 0.4031, 0.3961, 0.4321, 0.5730, 0.5266, 0.4780, 0.5004]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_consistency_model_pipeline_onestep(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = ConsistencyModelPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 inputs["timesteps"] = None image = pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_consistency_model_pipeline_onestep_class_cond(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(class_cond=True) pipe = ConsistencyModelPipeline(**components) pipe = pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) inputs["num_inference_steps"] = 1 inputs["timesteps"] = None inputs["class_labels"] = 0 image = pipe(**inputs).images assert image.shape == (1, 32, 32, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.5004, 0.5004, 0.4994, 0.5008, 0.4976, 0.5018, 0.4990, 0.4982, 0.4987]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @nightly @require_torch_gpu class ConsistencyModelPipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, seed=0, get_fixed_latents=False, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)): generator = torch.manual_seed(seed) inputs = { "num_inference_steps": None, "timesteps": [22, 0], "class_labels": 0, "generator": generator, "output_type": "np", } if get_fixed_latents: latents = self.get_fixed_latents(seed=seed, device=device, dtype=dtype, shape=shape) inputs["latents"] = latents return inputs def get_fixed_latents(self, seed=0, device="cpu", dtype=torch.float32, shape=(1, 3, 64, 64)): if isinstance(device, str): device = torch.device(device) generator = torch.Generator(device=device).manual_seed(seed) latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) return latents def test_consistency_model_cd_multistep(self): unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) pipe.to(torch_device=torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() image = pipe(**inputs).images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0146, 0.0158, 0.0092, 0.0086, 0.0000, 0.0000, 0.0000, 0.0000, 0.0058]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 def test_consistency_model_cd_onestep(self): unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) pipe.to(torch_device=torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs() inputs["num_inference_steps"] = 1 inputs["timesteps"] = None image = pipe(**inputs).images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.0059, 0.0003, 0.0000, 0.0023, 0.0052, 0.0007, 0.0165, 0.0081, 0.0095]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @require_torch_2 def test_consistency_model_cd_multistep_flash_attn(self): unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) pipe.to(torch_device=torch_device, torch_dtype=torch.float16) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(get_fixed_latents=True, device=torch_device) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): image = pipe(**inputs).images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1845, 0.1371, 0.1211, 0.2035, 0.1954, 0.1323, 0.1773, 0.1593, 0.1314]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @require_torch_2 def test_consistency_model_cd_onestep_flash_attn(self): unet = UNet2DModel.from_pretrained("diffusers/consistency_models", subfolder="diffusers_cd_imagenet64_l2") scheduler = CMStochasticIterativeScheduler( num_train_timesteps=40, sigma_min=0.002, sigma_max=80.0, ) pipe = ConsistencyModelPipeline(unet=unet, scheduler=scheduler) pipe.to(torch_device=torch_device, torch_dtype=torch.float16) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(get_fixed_latents=True, device=torch_device) inputs["num_inference_steps"] = 1 inputs["timesteps"] = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=True, enable_math=False, enable_mem_efficient=False): image = pipe(**inputs).images assert image.shape == (1, 64, 64, 3) image_slice = image[0, -3:, -3:, -1] expected_slice = np.array([0.1623, 0.2009, 0.2387, 0.1731, 0.1168, 0.1202, 0.2031, 0.1327, 0.2447]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_unet_spatiotemporal.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import unittest import torch from diffusers import UNetSpatioTemporalConditionModel from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_all_close, torch_device, ) from .test_modeling_common import ModelTesterMixin, UNetTesterMixin logger = logging.get_logger(__name__) enable_full_determinism() class UNetSpatioTemporalConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNetSpatioTemporalConditionModel main_input_name = "sample" @property def dummy_input(self): batch_size = 2 num_frames = 2 num_channels = 4 sizes = (32, 32) noise = floats_tensor((batch_size, num_frames, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 1, 32)).to(torch_device) return { "sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states, "added_time_ids": self._get_add_time_ids(), } @property def input_shape(self): return (2, 2, 4, 32, 32) @property def output_shape(self): return (4, 32, 32) @property def fps(self): return 6 @property def motion_bucket_id(self): return 127 @property def noise_aug_strength(self): return 0.02 @property def addition_time_embed_dim(self): return 32 def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64), "down_block_types": ( "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal", ), "up_block_types": ( "UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", ), "cross_attention_dim": 32, "num_attention_heads": 8, "out_channels": 4, "in_channels": 4, "layers_per_block": 2, "sample_size": 32, "projection_class_embeddings_input_dim": self.addition_time_embed_dim * 3, "addition_time_embed_dim": self.addition_time_embed_dim, } inputs_dict = self.dummy_input return init_dict, inputs_dict def _get_add_time_ids(self, do_classifier_free_guidance=True): add_time_ids = [self.fps, self.motion_bucket_id, self.noise_aug_strength] passed_add_embed_dim = self.addition_time_embed_dim * len(add_time_ids) expected_add_embed_dim = self.addition_time_embed_dim * 3 if expected_add_embed_dim != passed_add_embed_dim: raise ValueError( f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." ) add_time_ids = torch.tensor([add_time_ids], device=torch_device) add_time_ids = add_time_ids.repeat(1, 1) if do_classifier_free_guidance: add_time_ids = torch.cat([add_time_ids, add_time_ids]) return add_time_ids @unittest.skip("Number of Norm Groups is not configurable") def test_forward_with_norm_groups(self): pass @unittest.skip("Deprecated functionality") def test_model_attention_slicing(self): pass @unittest.skip("Not supported") def test_model_with_use_linear_projection(self): pass @unittest.skip("Not supported") def test_model_with_simple_projection(self): pass @unittest.skip("Not supported") def test_model_with_class_embeddings_concat(self): pass @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_enable_works(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.enable_xformers_memory_efficient_attention() assert ( model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ == "XFormersAttnProcessor" ), "xformers is not enabled" @unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS") def test_gradient_checkpointing(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) assert not model.is_gradient_checkpointing and model.training out = model(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() labels = torch.randn_like(out) loss = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing model_2 = self.model_class(**init_dict) # clone model model_2.load_state_dict(model.state_dict()) model_2.to(torch_device) model_2.enable_gradient_checkpointing() assert model_2.is_gradient_checkpointing and model_2.training out_2 = model_2(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_2.zero_grad() loss_2 = (out_2 - labels).mean() loss_2.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_2).abs() < 1e-5) named_params = dict(model.named_parameters()) named_params_2 = dict(model_2.named_parameters()) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5)) def test_model_with_num_attention_heads_tuple(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["num_attention_heads"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_with_cross_attention_dim_tuple(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["cross_attention_dim"] = (32, 32) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_gradient_checkpointing_is_applied(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["num_attention_heads"] = (8, 16) model_class_copy = copy.copy(self.model_class) modules_with_gc_enabled = {} # now monkey patch the following function: # def _set_gradient_checkpointing(self, module, value=False): # if hasattr(module, "gradient_checkpointing"): # module.gradient_checkpointing = value def _set_gradient_checkpointing_new(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value modules_with_gc_enabled[module.__class__.__name__] = True model_class_copy._set_gradient_checkpointing = _set_gradient_checkpointing_new model = model_class_copy(**init_dict) model.enable_gradient_checkpointing() EXPECTED_SET = { "TransformerSpatioTemporalModel", "CrossAttnDownBlockSpatioTemporal", "DownBlockSpatioTemporal", "UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", "UNetMidBlockSpatioTemporal", } assert set(modules_with_gc_enabled.keys()) == EXPECTED_SET assert all(modules_with_gc_enabled.values()), "All modules should be enabled" def test_pickle(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["num_attention_heads"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample = model(**inputs_dict).sample sample_copy = copy.copy(sample) assert (sample - sample_copy).abs().max() < 1e-4
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_unet_2d_flax.py
import gc import unittest from parameterized import parameterized from diffusers import FlaxUNet2DConditionModel from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp @slow @require_flax class FlaxUNet2DConditionModelIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): dtype = jnp.bfloat16 if fp16 else jnp.float32 image = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) return image def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"): dtype = jnp.bfloat16 if fp16 else jnp.float32 revision = "bf16" if fp16 else None model, params = FlaxUNet2DConditionModel.from_pretrained( model_id, subfolder="unet", dtype=dtype, revision=revision ) return model, params def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False): dtype = jnp.bfloat16 if fp16 else jnp.float32 hidden_states = jnp.array(load_hf_numpy(self.get_file_format(seed, shape)), dtype=dtype) return hidden_states @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) def test_compvis_sd_v1_4_flax_vs_torch_fp16(self, seed, timestep, expected_slice): model, params = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True) latents = self.get_latents(seed, fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) sample = model.apply( {"params": params}, latents, jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=encoder_hidden_states, ).sample assert sample.shape == latents.shape output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) def test_stabilityai_sd_v2_flax_vs_torch_fp16(self, seed, timestep, expected_slice): model, params = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True) latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True) sample = model.apply( {"params": params}, latents, jnp.array(timestep, dtype=jnp.int32), encoder_hidden_states=encoder_hidden_states, ).sample assert sample.shape == latents.shape output_slice = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())), dtype=jnp.float32) expected_output_slice = jnp.array(expected_slice, dtype=jnp.float32) # Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware assert jnp.allclose(output_slice, expected_output_slice, atol=1e-2)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_attention_processor.py
import tempfile import unittest import numpy as np import torch from diffusers import DiffusionPipeline from diffusers.models.attention_processor import Attention, AttnAddedKVProcessor class AttnAddedKVProcessorTests(unittest.TestCase): def get_constructor_arguments(self, only_cross_attention: bool = False): query_dim = 10 if only_cross_attention: cross_attention_dim = 12 else: # when only cross attention is not set, the cross attention dim must be the same as the query dim cross_attention_dim = query_dim return { "query_dim": query_dim, "cross_attention_dim": cross_attention_dim, "heads": 2, "dim_head": 4, "added_kv_proj_dim": 6, "norm_num_groups": 1, "only_cross_attention": only_cross_attention, "processor": AttnAddedKVProcessor(), } def get_forward_arguments(self, query_dim, added_kv_proj_dim): batch_size = 2 hidden_states = torch.rand(batch_size, query_dim, 3, 2) encoder_hidden_states = torch.rand(batch_size, 4, added_kv_proj_dim) attention_mask = None return { "hidden_states": hidden_states, "encoder_hidden_states": encoder_hidden_states, "attention_mask": attention_mask, } def test_only_cross_attention(self): # self and cross attention torch.manual_seed(0) constructor_args = self.get_constructor_arguments(only_cross_attention=False) attn = Attention(**constructor_args) self.assertTrue(attn.to_k is not None) self.assertTrue(attn.to_v is not None) forward_args = self.get_forward_arguments( query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"] ) self_and_cross_attn_out = attn(**forward_args) # only self attention torch.manual_seed(0) constructor_args = self.get_constructor_arguments(only_cross_attention=True) attn = Attention(**constructor_args) self.assertTrue(attn.to_k is None) self.assertTrue(attn.to_v is None) forward_args = self.get_forward_arguments( query_dim=constructor_args["query_dim"], added_kv_proj_dim=constructor_args["added_kv_proj_dim"] ) only_cross_attn_out = attn(**forward_args) self.assertTrue((only_cross_attn_out != self_and_cross_attn_out).all()) class DeprecatedAttentionBlockTests(unittest.TestCase): def test_conversion_when_using_device_map(self): pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None) pre_conversion = pipe( "foo", num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np", ).images # the initial conversion succeeds pipe = DiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", device_map="sequential", safety_checker=None ) conversion = pipe( "foo", num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np", ).images with tempfile.TemporaryDirectory() as tmpdir: # save the converted model pipe.save_pretrained(tmpdir) # can also load the converted weights pipe = DiffusionPipeline.from_pretrained(tmpdir, device_map="sequential", safety_checker=None) after_conversion = pipe( "foo", num_inference_steps=2, generator=torch.Generator("cpu").manual_seed(0), output_type="np", ).images self.assertTrue(np.allclose(pre_conversion, conversion, atol=1e-5)) self.assertTrue(np.allclose(conversion, after_conversion, atol=1e-5))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_unet_motion.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import tempfile import unittest import numpy as np import torch from diffusers import MotionAdapter, UNet2DConditionModel, UNetMotionModel from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, torch_device, ) from .test_modeling_common import ModelTesterMixin, UNetTesterMixin logger = logging.get_logger(__name__) enable_full_determinism() class UNetMotionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNetMotionModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 num_frames = 8 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 4, 32)).to(torch_device) return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} @property def input_shape(self): return (4, 8, 32, 32) @property def output_shape(self): return (4, 8, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64), "down_block_types": ("CrossAttnDownBlockMotion", "DownBlockMotion"), "up_block_types": ("UpBlockMotion", "CrossAttnUpBlockMotion"), "cross_attention_dim": 32, "num_attention_heads": 4, "out_channels": 4, "in_channels": 4, "layers_per_block": 1, "sample_size": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_unet2d(self): torch.manual_seed(0) unet2d = UNet2DConditionModel() torch.manual_seed(1) model = self.model_class.from_unet2d(unet2d) model_state_dict = model.state_dict() for param_name, param_value in unet2d.named_parameters(): self.assertTrue(torch.equal(model_state_dict[param_name], param_value)) def test_freeze_unet2d(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.freeze_unet2d_params() for param_name, param_value in model.named_parameters(): if "motion_modules" not in param_name: self.assertFalse(param_value.requires_grad) else: self.assertTrue(param_value.requires_grad) def test_loading_motion_adapter(self): model = self.model_class() adapter = MotionAdapter() model.load_motion_modules(adapter) for idx, down_block in enumerate(model.down_blocks): adapter_state_dict = adapter.down_blocks[idx].motion_modules.state_dict() for param_name, param_value in down_block.motion_modules.named_parameters(): self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value)) for idx, up_block in enumerate(model.up_blocks): adapter_state_dict = adapter.up_blocks[idx].motion_modules.state_dict() for param_name, param_value in up_block.motion_modules.named_parameters(): self.assertTrue(torch.equal(adapter_state_dict[param_name], param_value)) mid_block_adapter_state_dict = adapter.mid_block.motion_modules.state_dict() for param_name, param_value in model.mid_block.motion_modules.named_parameters(): self.assertTrue(torch.equal(mid_block_adapter_state_dict[param_name], param_value)) def test_saving_motion_modules(self): torch.manual_seed(0) init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: model.save_motion_modules(tmpdirname) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "diffusion_pytorch_model.safetensors"))) adapter_loaded = MotionAdapter.from_pretrained(tmpdirname) torch.manual_seed(0) model_loaded = self.model_class(**init_dict) model_loaded.load_motion_modules(adapter_loaded) model_loaded.to(torch_device) with torch.no_grad(): output = model(**inputs_dict)[0] output_loaded = model_loaded(**inputs_dict)[0] max_diff = (output - output_loaded).abs().max().item() self.assertLessEqual(max_diff, 1e-4, "Models give different forward passes") @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_enable_works(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.enable_xformers_memory_efficient_attention() assert ( model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ == "XFormersAttnProcessor" ), "xformers is not enabled" def test_gradient_checkpointing_is_applied(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model_class_copy = copy.copy(self.model_class) modules_with_gc_enabled = {} # now monkey patch the following function: # def _set_gradient_checkpointing(self, module, value=False): # if hasattr(module, "gradient_checkpointing"): # module.gradient_checkpointing = value def _set_gradient_checkpointing_new(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value modules_with_gc_enabled[module.__class__.__name__] = True model_class_copy._set_gradient_checkpointing = _set_gradient_checkpointing_new model = model_class_copy(**init_dict) model.enable_gradient_checkpointing() EXPECTED_SET = { "CrossAttnUpBlockMotion", "CrossAttnDownBlockMotion", "UNetMidBlockCrossAttnMotion", "UpBlockMotion", "Transformer2DModel", "DownBlockMotion", } assert set(modules_with_gc_enabled.keys()) == EXPECTED_SET assert all(modules_with_gc_enabled.values()), "All modules should be enabled" def test_feed_forward_chunking(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 32 model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict)[0] model.enable_forward_chunking() with torch.no_grad(): output_2 = model(**inputs_dict)[0] self.assertEqual(output.shape, output_2.shape, "Shape doesn't match") assert np.abs(output.cpu() - output_2.cpu()).max() < 1e-2 def test_pickle(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample = model(**inputs_dict).sample sample_copy = copy.copy(sample) assert (sample - sample_copy).abs().max() < 1e-4 def test_from_save_pretrained(self, expected_max_diff=5e-5): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) torch.manual_seed(0) new_model = self.model_class.from_pretrained(tmpdirname) new_model.to(torch_device) with torch.no_grad(): image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_from_save_pretrained_variant(self, expected_max_diff=5e-5): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False) torch.manual_seed(0) new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16") # non-variant cannot be loaded with self.assertRaises(OSError) as error_context: self.model_class.from_pretrained(tmpdirname) # make sure that error message states what keys are missing assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception) new_model.to(torch_device) with torch.no_grad(): image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 32) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_unet_2d_condition.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import os import tempfile import unittest from collections import OrderedDict import torch from parameterized import parameterized from pytest import mark from diffusers import UNet2DConditionModel from diffusers.models.attention_processor import CustomDiffusionAttnProcessor, IPAdapterAttnProcessor from diffusers.models.embeddings import ImageProjection, IPAdapterPlusImageProjection from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_hf_numpy, require_torch_accelerator, require_torch_accelerator_with_fp16, require_torch_accelerator_with_training, require_torch_gpu, skip_mps, slow, torch_all_close, torch_device, ) from .test_modeling_common import ModelTesterMixin, UNetTesterMixin logger = logging.get_logger(__name__) enable_full_determinism() def create_ip_adapter_state_dict(model): # "ip_adapter" (cross-attention weights) ip_cross_attn_state_dict = {} key_id = 1 for name in model.attn_processors.keys(): cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = model.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(model.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = model.config.block_out_channels[block_id] if cross_attention_dim is not None: sd = IPAdapterAttnProcessor( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0 ).state_dict() ip_cross_attn_state_dict.update( { f"{key_id}.to_k_ip.weight": sd["to_k_ip.weight"], f"{key_id}.to_v_ip.weight": sd["to_v_ip.weight"], } ) key_id += 2 # "image_proj" (ImageProjection layer weights) cross_attention_dim = model.config["cross_attention_dim"] image_projection = ImageProjection( cross_attention_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, num_image_text_embeds=4 ) ip_image_projection_state_dict = {} sd = image_projection.state_dict() ip_image_projection_state_dict.update( { "proj.weight": sd["image_embeds.weight"], "proj.bias": sd["image_embeds.bias"], "norm.weight": sd["norm.weight"], "norm.bias": sd["norm.bias"], } ) del sd ip_state_dict = {} ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) return ip_state_dict def create_ip_adapter_plus_state_dict(model): # "ip_adapter" (cross-attention weights) ip_cross_attn_state_dict = {} key_id = 1 for name in model.attn_processors.keys(): cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = model.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(model.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = model.config.block_out_channels[block_id] if cross_attention_dim is not None: sd = IPAdapterAttnProcessor( hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0 ).state_dict() ip_cross_attn_state_dict.update( { f"{key_id}.to_k_ip.weight": sd["to_k_ip.weight"], f"{key_id}.to_v_ip.weight": sd["to_v_ip.weight"], } ) key_id += 2 # "image_proj" (ImageProjection layer weights) cross_attention_dim = model.config["cross_attention_dim"] image_projection = IPAdapterPlusImageProjection( embed_dims=cross_attention_dim, output_dims=cross_attention_dim, dim_head=32, heads=2, num_queries=4 ) ip_image_projection_state_dict = OrderedDict() for k, v in image_projection.state_dict().items(): if "2.to" in k: k = k.replace("2.to", "0.to") elif "3.0.weight" in k: k = k.replace("3.0.weight", "1.0.weight") elif "3.0.bias" in k: k = k.replace("3.0.bias", "1.0.bias") elif "3.0.weight" in k: k = k.replace("3.0.weight", "1.0.weight") elif "3.1.net.0.proj.weight" in k: k = k.replace("3.1.net.0.proj.weight", "1.1.weight") elif "3.net.2.weight" in k: k = k.replace("3.net.2.weight", "1.3.weight") elif "layers.0.0" in k: k = k.replace("layers.0.0", "layers.0.0.norm1") elif "layers.0.1" in k: k = k.replace("layers.0.1", "layers.0.0.norm2") elif "layers.1.0" in k: k = k.replace("layers.1.0", "layers.1.0.norm1") elif "layers.1.1" in k: k = k.replace("layers.1.1", "layers.1.0.norm2") elif "layers.2.0" in k: k = k.replace("layers.2.0", "layers.2.0.norm1") elif "layers.2.1" in k: k = k.replace("layers.2.1", "layers.2.0.norm2") if "norm_cross" in k: ip_image_projection_state_dict[k.replace("norm_cross", "norm1")] = v elif "layer_norm" in k: ip_image_projection_state_dict[k.replace("layer_norm", "norm2")] = v elif "to_k" in k: ip_image_projection_state_dict[k.replace("to_k", "to_kv")] = torch.cat([v, v], dim=0) elif "to_v" in k: continue elif "to_out.0" in k: ip_image_projection_state_dict[k.replace("to_out.0", "to_out")] = v else: ip_image_projection_state_dict[k] = v ip_state_dict = {} ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict}) return ip_state_dict def create_custom_diffusion_layers(model, mock_weights: bool = True): train_kv = True train_q_out = True custom_diffusion_attn_procs = {} st = model.state_dict() for name, _ in model.attn_processors.items(): cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim if name.startswith("mid_block"): hidden_size = model.config.block_out_channels[-1] elif name.startswith("up_blocks"): block_id = int(name[len("up_blocks.")]) hidden_size = list(reversed(model.config.block_out_channels))[block_id] elif name.startswith("down_blocks"): block_id = int(name[len("down_blocks.")]) hidden_size = model.config.block_out_channels[block_id] layer_name = name.split(".processor")[0] weights = { "to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"], "to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"], } if train_q_out: weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"] weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"] weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"] if cross_attention_dim is not None: custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor( train_kv=train_kv, train_q_out=train_q_out, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ).to(model.device) custom_diffusion_attn_procs[name].load_state_dict(weights) if mock_weights: # add 1 to weights to mock trained weights with torch.no_grad(): custom_diffusion_attn_procs[name].to_k_custom_diffusion.weight += 1 custom_diffusion_attn_procs[name].to_v_custom_diffusion.weight += 1 else: custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor( train_kv=False, train_q_out=False, hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, ) del st return custom_diffusion_attn_procs class UNet2DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DConditionModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 4, 32)).to(torch_device) return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64), "down_block_types": ("CrossAttnDownBlock2D", "DownBlock2D"), "up_block_types": ("UpBlock2D", "CrossAttnUpBlock2D"), "cross_attention_dim": 32, "attention_head_dim": 8, "out_channels": 4, "in_channels": 4, "layers_per_block": 2, "sample_size": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_enable_works(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.enable_xformers_memory_efficient_attention() assert ( model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ == "XFormersAttnProcessor" ), "xformers is not enabled" @require_torch_accelerator_with_training def test_gradient_checkpointing(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) assert not model.is_gradient_checkpointing and model.training out = model(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() labels = torch.randn_like(out) loss = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing model_2 = self.model_class(**init_dict) # clone model model_2.load_state_dict(model.state_dict()) model_2.to(torch_device) model_2.enable_gradient_checkpointing() assert model_2.is_gradient_checkpointing and model_2.training out_2 = model_2(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_2.zero_grad() loss_2 = (out_2 - labels).mean() loss_2.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_2).abs() < 1e-5) named_params = dict(model.named_parameters()) named_params_2 = dict(model_2.named_parameters()) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5)) def test_model_with_attention_head_dim_tuple(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_with_use_linear_projection(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["use_linear_projection"] = True model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_with_cross_attention_dim_tuple(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["cross_attention_dim"] = (32, 32) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_with_simple_projection(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() batch_size, _, _, sample_size = inputs_dict["sample"].shape init_dict["class_embed_type"] = "simple_projection" init_dict["projection_class_embeddings_input_dim"] = sample_size inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_with_class_embeddings_concat(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() batch_size, _, _, sample_size = inputs_dict["sample"].shape init_dict["class_embed_type"] = "simple_projection" init_dict["projection_class_embeddings_input_dim"] = sample_size init_dict["class_embeddings_concat"] = True inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_attention_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) model.eval() model.set_attention_slice("auto") with torch.no_grad(): output = model(**inputs_dict) assert output is not None model.set_attention_slice("max") with torch.no_grad(): output = model(**inputs_dict) assert output is not None model.set_attention_slice(2) with torch.no_grad(): output = model(**inputs_dict) assert output is not None def test_model_sliceable_head_dim(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) def check_sliceable_dim_attr(module: torch.nn.Module): if hasattr(module, "set_attention_slice"): assert isinstance(module.sliceable_head_dim, int) for child in module.children(): check_sliceable_dim_attr(child) # retrieve number of attention layers for module in model.children(): check_sliceable_dim_attr(module) def test_gradient_checkpointing_is_applied(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model_class_copy = copy.copy(self.model_class) modules_with_gc_enabled = {} # now monkey patch the following function: # def _set_gradient_checkpointing(self, module, value=False): # if hasattr(module, "gradient_checkpointing"): # module.gradient_checkpointing = value def _set_gradient_checkpointing_new(self, module, value=False): if hasattr(module, "gradient_checkpointing"): module.gradient_checkpointing = value modules_with_gc_enabled[module.__class__.__name__] = True model_class_copy._set_gradient_checkpointing = _set_gradient_checkpointing_new model = model_class_copy(**init_dict) model.enable_gradient_checkpointing() EXPECTED_SET = { "CrossAttnUpBlock2D", "CrossAttnDownBlock2D", "UNetMidBlock2DCrossAttn", "UpBlock2D", "Transformer2DModel", "DownBlock2D", } assert set(modules_with_gc_enabled.keys()) == EXPECTED_SET assert all(modules_with_gc_enabled.values()), "All modules should be enabled" def test_special_attn_proc(self): class AttnEasyProc(torch.nn.Module): def __init__(self, num): super().__init__() self.weight = torch.nn.Parameter(torch.tensor(num)) self.is_run = False self.number = 0 self.counter = 0 def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, number=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) hidden_states += self.weight self.is_run = True self.counter += 1 self.number = number return hidden_states # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) processor = AttnEasyProc(5.0) model.set_attn_processor(processor) model(**inputs_dict, cross_attention_kwargs={"number": 123}).sample assert processor.counter == 12 assert processor.is_run assert processor.number == 123 @parameterized.expand( [ # fmt: off [torch.bool], [torch.long], [torch.float], # fmt: on ] ) def test_model_xattn_mask(self, mask_dtype): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16)}) model.to(torch_device) model.eval() cond = inputs_dict["encoder_hidden_states"] with torch.no_grad(): full_cond_out = model(**inputs_dict).sample assert full_cond_out is not None keepall_mask = torch.ones(*cond.shape[:-1], device=cond.device, dtype=mask_dtype) full_cond_keepallmask_out = model(**{**inputs_dict, "encoder_attention_mask": keepall_mask}).sample assert full_cond_keepallmask_out.allclose( full_cond_out, rtol=1e-05, atol=1e-05 ), "a 'keep all' mask should give the same result as no mask" trunc_cond = cond[:, :-1, :] trunc_cond_out = model(**{**inputs_dict, "encoder_hidden_states": trunc_cond}).sample assert not trunc_cond_out.allclose( full_cond_out, rtol=1e-05, atol=1e-05 ), "discarding the last token from our cond should change the result" batch, tokens, _ = cond.shape mask_last = (torch.arange(tokens) < tokens - 1).expand(batch, -1).to(cond.device, mask_dtype) masked_cond_out = model(**{**inputs_dict, "encoder_attention_mask": mask_last}).sample assert masked_cond_out.allclose( trunc_cond_out, rtol=1e-05, atol=1e-05 ), "masking the last token from our cond should be equivalent to truncating that token out of the condition" # see diffusers.models.attention_processor::Attention#prepare_attention_mask # note: we may not need to fix mask padding to work for stable-diffusion cross-attn masks. # since the use-case (somebody passes in a too-short cross-attn mask) is pretty esoteric. # maybe it's fine that this only works for the unclip use-case. @mark.skip( reason="we currently pad mask by target_length tokens (what unclip needs), whereas stable-diffusion's cross-attn needs to instead pad by remaining_length." ) def test_model_xattn_padding(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16)}) model.to(torch_device) model.eval() cond = inputs_dict["encoder_hidden_states"] with torch.no_grad(): full_cond_out = model(**inputs_dict).sample assert full_cond_out is not None batch, tokens, _ = cond.shape keeplast_mask = (torch.arange(tokens) == tokens - 1).expand(batch, -1).to(cond.device, torch.bool) keeplast_out = model(**{**inputs_dict, "encoder_attention_mask": keeplast_mask}).sample assert not keeplast_out.allclose(full_cond_out), "a 'keep last token' mask should change the result" trunc_mask = torch.zeros(batch, tokens - 1, device=cond.device, dtype=torch.bool) trunc_mask_out = model(**{**inputs_dict, "encoder_attention_mask": trunc_mask}).sample assert trunc_mask_out.allclose( keeplast_out ), "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask." def test_custom_diffusion_processors(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample1 = model(**inputs_dict).sample custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) # make sure we can set a list of attention processors model.set_attn_processor(custom_diffusion_attn_procs) model.to(torch_device) # test that attn processors can be set to itself model.set_attn_processor(model.attn_processors) with torch.no_grad(): sample2 = model(**inputs_dict).sample assert (sample1 - sample2).abs().max() < 3e-3 def test_custom_diffusion_save_load(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): old_sample = model(**inputs_dict).sample custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) model.set_attn_processor(custom_diffusion_attn_procs) with torch.no_grad(): sample = model(**inputs_dict).sample with tempfile.TemporaryDirectory() as tmpdirname: model.save_attn_procs(tmpdirname, safe_serialization=False) self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_custom_diffusion_weights.bin"))) torch.manual_seed(0) new_model = self.model_class(**init_dict) new_model.load_attn_procs(tmpdirname, weight_name="pytorch_custom_diffusion_weights.bin") new_model.to(torch_device) with torch.no_grad(): new_sample = new_model(**inputs_dict).sample assert (sample - new_sample).abs().max() < 1e-4 # custom diffusion and no custom diffusion should be the same assert (sample - old_sample).abs().max() < 3e-3 @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_custom_diffusion_xformers_on_off(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False) model.set_attn_processor(custom_diffusion_attn_procs) # default with torch.no_grad(): sample = model(**inputs_dict).sample model.enable_xformers_memory_efficient_attention() on_sample = model(**inputs_dict).sample model.disable_xformers_memory_efficient_attention() off_sample = model(**inputs_dict).sample assert (sample - on_sample).abs().max() < 1e-4 assert (sample - off_sample).abs().max() < 1e-4 def test_pickle(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) with torch.no_grad(): sample = model(**inputs_dict).sample sample_copy = copy.copy(sample) assert (sample - sample_copy).abs().max() < 1e-4 def test_asymmetrical_unet(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() # Add asymmetry to configs init_dict["transformer_layers_per_block"] = [[3, 2], 1] init_dict["reverse_transformer_layers_per_block"] = [[3, 4], 1] torch.manual_seed(0) model = self.model_class(**init_dict) model.to(torch_device) output = model(**inputs_dict).sample expected_shape = inputs_dict["sample"].shape # Check if input and output shapes are the same self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_ip_adapter(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) # forward pass without ip-adapter with torch.no_grad(): sample1 = model(**inputs_dict).sample # update inputs_dict for ip-adapter batch_size = inputs_dict["encoder_hidden_states"].shape[0] image_embeds = floats_tensor((batch_size, 1, model.cross_attention_dim)).to(torch_device) inputs_dict["added_cond_kwargs"] = {"image_embeds": image_embeds} # make ip_adapter_1 and ip_adapter_2 ip_adapter_1 = create_ip_adapter_state_dict(model) image_proj_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["image_proj"].items()} cross_attn_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["ip_adapter"].items()} ip_adapter_2 = {} ip_adapter_2.update({"image_proj": image_proj_state_dict_2, "ip_adapter": cross_attn_state_dict_2}) # forward pass ip_adapter_1 model._load_ip_adapter_weights(ip_adapter_1) assert model.config.encoder_hid_dim_type == "ip_image_proj" assert model.encoder_hid_proj is not None assert model.down_blocks[0].attentions[0].transformer_blocks[0].attn2.processor.__class__.__name__ in ( "IPAdapterAttnProcessor", "IPAdapterAttnProcessor2_0", ) with torch.no_grad(): sample2 = model(**inputs_dict).sample # forward pass with ip_adapter_2 model._load_ip_adapter_weights(ip_adapter_2) with torch.no_grad(): sample3 = model(**inputs_dict).sample # forward pass with ip_adapter_1 again model._load_ip_adapter_weights(ip_adapter_1) with torch.no_grad(): sample4 = model(**inputs_dict).sample assert not sample1.allclose(sample2, atol=1e-4, rtol=1e-4) assert not sample2.allclose(sample3, atol=1e-4, rtol=1e-4) assert sample2.allclose(sample4, atol=1e-4, rtol=1e-4) def test_ip_adapter_plus(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = (8, 16) model = self.model_class(**init_dict) model.to(torch_device) # forward pass without ip-adapter with torch.no_grad(): sample1 = model(**inputs_dict).sample # update inputs_dict for ip-adapter batch_size = inputs_dict["encoder_hidden_states"].shape[0] image_embeds = floats_tensor((batch_size, 1, model.cross_attention_dim)).to(torch_device) inputs_dict["added_cond_kwargs"] = {"image_embeds": image_embeds} # make ip_adapter_1 and ip_adapter_2 ip_adapter_1 = create_ip_adapter_plus_state_dict(model) image_proj_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["image_proj"].items()} cross_attn_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["ip_adapter"].items()} ip_adapter_2 = {} ip_adapter_2.update({"image_proj": image_proj_state_dict_2, "ip_adapter": cross_attn_state_dict_2}) # forward pass ip_adapter_1 model._load_ip_adapter_weights(ip_adapter_1) assert model.config.encoder_hid_dim_type == "ip_image_proj" assert model.encoder_hid_proj is not None assert model.down_blocks[0].attentions[0].transformer_blocks[0].attn2.processor.__class__.__name__ in ( "IPAdapterAttnProcessor", "IPAdapterAttnProcessor2_0", ) with torch.no_grad(): sample2 = model(**inputs_dict).sample # forward pass with ip_adapter_2 model._load_ip_adapter_weights(ip_adapter_2) with torch.no_grad(): sample3 = model(**inputs_dict).sample # forward pass with ip_adapter_1 again model._load_ip_adapter_weights(ip_adapter_1) with torch.no_grad(): sample4 = model(**inputs_dict).sample assert not sample1.allclose(sample2, atol=1e-4, rtol=1e-4) assert not sample2.allclose(sample3, atol=1e-4, rtol=1e-4) assert sample2.allclose(sample4, atol=1e-4, rtol=1e-4) @slow class UNet2DConditionModelIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"): revision = "fp16" if fp16 else None torch_dtype = torch.float16 if fp16 else torch.float32 model = UNet2DConditionModel.from_pretrained( model_id, subfolder="unet", torch_dtype=torch_dtype, revision=revision ) model.to(torch_device).eval() return model @require_torch_gpu def test_set_attention_slice_auto(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() unet = self.get_unet_model() unet.set_attention_slice("auto") latents = self.get_latents(33) encoder_hidden_states = self.get_encoder_hidden_states(33) timestep = 1 with torch.no_grad(): _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 5 * 10**9 @require_torch_gpu def test_set_attention_slice_max(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() unet = self.get_unet_model() unet.set_attention_slice("max") latents = self.get_latents(33) encoder_hidden_states = self.get_encoder_hidden_states(33) timestep = 1 with torch.no_grad(): _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 5 * 10**9 @require_torch_gpu def test_set_attention_slice_int(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() unet = self.get_unet_model() unet.set_attention_slice(2) latents = self.get_latents(33) encoder_hidden_states = self.get_encoder_hidden_states(33) timestep = 1 with torch.no_grad(): _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 5 * 10**9 @require_torch_gpu def test_set_attention_slice_list(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() # there are 32 sliceable layers slice_list = 16 * [2, 3] unet = self.get_unet_model() unet.set_attention_slice(slice_list) latents = self.get_latents(33) encoder_hidden_states = self.get_encoder_hidden_states(33) timestep = 1 with torch.no_grad(): _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample mem_bytes = torch.cuda.max_memory_allocated() assert mem_bytes < 5 * 10**9 def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False): dtype = torch.float16 if fp16 else torch.float32 hidden_states = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return hidden_states @parameterized.expand( [ # fmt: off [33, 4, [-0.4424, 0.1510, -0.1937, 0.2118, 0.3746, -0.3957, 0.0160, -0.0435]], [47, 0.55, [-0.1508, 0.0379, -0.3075, 0.2540, 0.3633, -0.0821, 0.1719, -0.0207]], [21, 0.89, [-0.6479, 0.6364, -0.3464, 0.8697, 0.4443, -0.6289, -0.0091, 0.1778]], [9, 1000, [0.8888, -0.5659, 0.5834, -0.7469, 1.1912, -0.3923, 1.1241, -0.4424]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_compvis_sd_v1_4(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4") latents = self.get_latents(seed) encoder_hidden_states = self.get_encoder_hidden_states(seed) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) @parameterized.expand( [ # fmt: off [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]], [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]], [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]], [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_compvis_sd_v1_4_fp16(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True) latents = self.get_latents(seed, fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand( [ # fmt: off [33, 4, [-0.4430, 0.1570, -0.1867, 0.2376, 0.3205, -0.3681, 0.0525, -0.0722]], [47, 0.55, [-0.1415, 0.0129, -0.3136, 0.2257, 0.3430, -0.0536, 0.2114, -0.0436]], [21, 0.89, [-0.7091, 0.6664, -0.3643, 0.9032, 0.4499, -0.6541, 0.0139, 0.1750]], [9, 1000, [0.8878, -0.5659, 0.5844, -0.7442, 1.1883, -0.3927, 1.1192, -0.4423]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_compvis_sd_v1_5(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5") latents = self.get_latents(seed) encoder_hidden_states = self.get_encoder_hidden_states(seed) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) @parameterized.expand( [ # fmt: off [83, 4, [-0.2695, -0.1669, 0.0073, -0.3181, -0.1187, -0.1676, -0.1395, -0.5972]], [17, 0.55, [-0.1290, -0.2588, 0.0551, -0.0916, 0.3286, 0.0238, -0.3669, 0.0322]], [8, 0.89, [-0.5283, 0.1198, 0.0870, -0.1141, 0.9189, -0.0150, 0.5474, 0.4319]], [3, 1000, [-0.5601, 0.2411, -0.5435, 0.1268, 1.1338, -0.2427, -0.0280, -1.0020]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5", fp16=True) latents = self.get_latents(seed, fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand( [ # fmt: off [33, 4, [-0.7639, 0.0106, -0.1615, -0.3487, -0.0423, -0.7972, 0.0085, -0.4858]], [47, 0.55, [-0.6564, 0.0795, -1.9026, -0.6258, 1.8235, 1.2056, 1.2169, 0.9073]], [21, 0.89, [0.0327, 0.4399, -0.6358, 0.3417, 0.4120, -0.5621, -0.0397, -1.0430]], [9, 1000, [0.1600, 0.7303, -1.0556, -0.3515, -0.7440, -1.2037, -1.8149, -1.8931]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_compvis_sd_inpaint(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting") latents = self.get_latents(seed, shape=(4, 9, 64, 64)) encoder_hidden_states = self.get_encoder_hidden_states(seed) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == (4, 4, 64, 64) output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [83, 4, [-0.1047, -1.7227, 0.1067, 0.0164, -0.5698, -0.4172, -0.1388, 1.1387]], [17, 0.55, [0.0975, -0.2856, -0.3508, -0.4600, 0.3376, 0.2930, -0.2747, -0.7026]], [8, 0.89, [-0.0952, 0.0183, -0.5825, -0.1981, 0.1131, 0.4668, -0.0395, -0.3486]], [3, 1000, [0.4790, 0.4949, -1.0732, -0.7158, 0.7959, -0.9478, 0.1105, -0.9741]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting", fp16=True) latents = self.get_latents(seed, shape=(4, 9, 64, 64), fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == (4, 4, 64, 64) output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand( [ # fmt: off [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]], [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]], [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]], [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_stabilityai_sd_v2_fp16(self, seed, timestep, expected_slice): model = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True) latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True) encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True) timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device) with torch.no_grad(): sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample assert sample.shape == latents.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_prior.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, slow, torch_all_close, torch_device, ) from .test_modeling_common import ModelTesterMixin enable_full_determinism() class PriorTransformerTests(ModelTesterMixin, unittest.TestCase): model_class = PriorTransformer main_input_name = "hidden_states" @property def dummy_input(self): batch_size = 4 embedding_dim = 8 num_embeddings = 7 hidden_states = floats_tensor((batch_size, embedding_dim)).to(torch_device) proj_embedding = floats_tensor((batch_size, embedding_dim)).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, num_embeddings, embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def get_dummy_seed_input(self, seed=0): torch.manual_seed(seed) batch_size = 4 embedding_dim = 8 num_embeddings = 7 hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) proj_embedding = torch.randn((batch_size, embedding_dim)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, num_embeddings, embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def input_shape(self): return (4, 8) @property def output_shape(self): return (4, 8) def prepare_init_args_and_inputs_for_common(self): init_dict = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): model, loading_info = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy", output_loading_info=True ) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) hidden_states = model(**self.dummy_input)[0] assert hidden_states is not None, "Make sure output is not None" def test_forward_signature(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_output_pretrained(self): model = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy") model = model.to(torch_device) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() input = self.get_dummy_seed_input() with torch.no_grad(): output = model(**input)[0] output_slice = output[0, :5].flatten().cpu() print(output_slice) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. expected_output_slice = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239]) self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) @slow class PriorTransformerIntegrationTests(unittest.TestCase): def get_dummy_seed_input(self, batch_size=1, embedding_dim=768, num_embeddings=77, seed=0): torch.manual_seed(seed) batch_size = batch_size embedding_dim = embedding_dim num_embeddings = num_embeddings hidden_states = torch.randn((batch_size, embedding_dim)).to(torch_device) proj_embedding = torch.randn((batch_size, embedding_dim)).to(torch_device) encoder_hidden_states = torch.randn((batch_size, num_embeddings, embedding_dim)).to(torch_device) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) @parameterized.expand( [ # fmt: off [13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]], [37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]], # fmt: on ] ) def test_kandinsky_prior(self, seed, expected_slice): model = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior", subfolder="prior") model.to(torch_device) input = self.get_dummy_seed_input(seed=seed) with torch.no_grad(): sample = model(**input)[0] assert list(sample.shape) == [1, 768] output_slice = sample[0, :8].flatten().cpu() print(output_slice) expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_unet_2d.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import math import unittest import torch from diffusers import UNet2DModel from diffusers.utils import logging from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, require_torch_accelerator, slow, torch_all_close, torch_device, ) from .test_modeling_common import ModelTesterMixin, UNetTesterMixin logger = logging.get_logger(__name__) enable_full_determinism() class Unet2DModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64), "down_block_types": ("DownBlock2D", "AttnDownBlock2D"), "up_block_types": ("AttnUpBlock2D", "UpBlock2D"), "attention_head_dim": 3, "out_channels": 3, "in_channels": 3, "layers_per_block": 2, "sample_size": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_mid_block_attn_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["add_attention"] = True init_dict["attn_norm_num_groups"] = 8 model = self.model_class(**init_dict) model.to(torch_device) model.eval() self.assertIsNotNone( model.mid_block.attentions[0].group_norm, "Mid block Attention group norm should exist but does not." ) self.assertEqual( model.mid_block.attentions[0].group_norm.num_groups, init_dict["attn_norm_num_groups"], "Mid block Attention group norm does not have the expected number of groups.", ) with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") class UNetLDMModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (4, 32, 32) @property def output_shape(self): return (4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "sample_size": 32, "in_channels": 4, "out_channels": 4, "layers_per_block": 2, "block_out_channels": (32, 64), "attention_head_dim": 32, "down_block_types": ("DownBlock2D", "DownBlock2D"), "up_block_types": ("UpBlock2D", "UpBlock2D"), } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): model, loading_info = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input).sample assert image is not None, "Make sure output is not None" @require_torch_accelerator def test_from_pretrained_accelerate(self): model, _ = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) model.to(torch_device) image = model(**self.dummy_input).sample assert image is not None, "Make sure output is not None" @require_torch_accelerator def test_from_pretrained_accelerate_wont_change_results(self): # by defautl model loading will use accelerate as `low_cpu_mem_usage=True` model_accelerate, _ = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update", output_loading_info=True) model_accelerate.to(torch_device) model_accelerate.eval() noise = torch.randn( 1, model_accelerate.config.in_channels, model_accelerate.config.sample_size, model_accelerate.config.sample_size, generator=torch.manual_seed(0), ) noise = noise.to(torch_device) time_step = torch.tensor([10] * noise.shape[0]).to(torch_device) arr_accelerate = model_accelerate(noise, time_step)["sample"] # two models don't need to stay in the device at the same time del model_accelerate torch.cuda.empty_cache() gc.collect() model_normal_load, _ = UNet2DModel.from_pretrained( "fusing/unet-ldm-dummy-update", output_loading_info=True, low_cpu_mem_usage=False ) model_normal_load.to(torch_device) model_normal_load.eval() arr_normal_load = model_normal_load(noise, time_step)["sample"] assert torch_all_close(arr_accelerate, arr_normal_load, rtol=1e-3) def test_output_pretrained(self): model = UNet2DModel.from_pretrained("fusing/unet-ldm-dummy-update") model.eval() model.to(torch_device) noise = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0), ) noise = noise.to(torch_device) time_step = torch.tensor([10] * noise.shape[0]).to(torch_device) with torch.no_grad(): output = model(noise, time_step).sample output_slice = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-13.3258, -20.1100, -15.9873, -17.6617, -23.0596, -17.9419, -13.3675, -16.1889, -12.3800]) # fmt: on self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-3)) class NCSNppModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet2DModel main_input_name = "sample" @property def dummy_input(self, sizes=(32, 32)): batch_size = 4 num_channels = 3 noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor(batch_size * [10]).to(dtype=torch.int32, device=torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [32, 64, 64, 64], "in_channels": 3, "layers_per_block": 1, "out_channels": 3, "time_embedding_type": "fourier", "norm_eps": 1e-6, "mid_block_scale_factor": math.sqrt(2.0), "norm_num_groups": None, "down_block_types": [ "SkipDownBlock2D", "AttnSkipDownBlock2D", "SkipDownBlock2D", "SkipDownBlock2D", ], "up_block_types": [ "SkipUpBlock2D", "SkipUpBlock2D", "AttnSkipUpBlock2D", "SkipUpBlock2D", ], } inputs_dict = self.dummy_input return init_dict, inputs_dict @slow def test_from_pretrained_hub(self): model, loading_info = UNet2DModel.from_pretrained("google/ncsnpp-celebahq-256", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) inputs = self.dummy_input noise = floats_tensor((4, 3) + (256, 256)).to(torch_device) inputs["sample"] = noise image = model(**inputs) assert image is not None, "Make sure output is not None" @slow def test_output_pretrained_ve_mid(self): model = UNet2DModel.from_pretrained("google/ncsnpp-celebahq-256") model.to(torch_device) batch_size = 4 num_channels = 3 sizes = (256, 256) noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) with torch.no_grad(): output = model(noise, time_step).sample output_slice = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-4836.2178, -6487.1470, -3816.8196, -7964.9302, -10966.3037, -20043.5957, 8137.0513, 2340.3328, 544.6056]) # fmt: on self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) def test_output_pretrained_ve_large(self): model = UNet2DModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update") model.to(torch_device) batch_size = 4 num_channels = 3 sizes = (32, 32) noise = torch.ones((batch_size, num_channels) + sizes).to(torch_device) time_step = torch.tensor(batch_size * [1e-4]).to(torch_device) with torch.no_grad(): output = model(noise, time_step).sample output_slice = output[0, -3:, -3:, -1].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-0.0325, -0.0900, -0.0869, -0.0332, -0.0725, -0.0270, -0.0101, 0.0227, 0.0256]) # fmt: on self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) def test_forward_with_norm_groups(self): # not required for this model pass
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_unet_1d.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import UNet1DModel from diffusers.utils.testing_utils import ( backend_manual_seed, floats_tensor, slow, torch_device, ) from .test_modeling_common import ModelTesterMixin, UNetTesterMixin class UNet1DModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet1DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_features = 14 seq_len = 16 noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) time_step = torch.tensor([10] * batch_size).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (4, 14, 16) @property def output_shape(self): return (4, 14, 16) def test_ema_training(self): pass def test_training(self): pass def test_determinism(self): super().test_determinism() def test_outputs_equivalence(self): super().test_outputs_equivalence() def test_from_save_pretrained(self): super().test_from_save_pretrained() def test_from_save_pretrained_variant(self): super().test_from_save_pretrained_variant() def test_model_from_pretrained(self): super().test_model_from_pretrained() def test_output(self): super().test_output() def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64, 128, 256), "in_channels": 14, "out_channels": 14, "time_embedding_type": "positional", "use_timestep_embedding": True, "flip_sin_to_cos": False, "freq_shift": 1.0, "out_block_type": "OutConv1DBlock", "mid_block_type": "MidResTemporalBlock1D", "down_block_types": ("DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"), "up_block_types": ("UpResnetBlock1D", "UpResnetBlock1D", "UpResnetBlock1D"), "act_fn": "swish", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): model, loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="unet" ) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): model = UNet1DModel.from_pretrained("bglick13/hopper-medium-v2-value-function-hor32", subfolder="unet") torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_features = model.config.in_channels seq_len = 16 noise = torch.randn((1, seq_len, num_features)).permute( 0, 2, 1 ) # match original, we can update values and remove time_step = torch.full((num_features,), 0) with torch.no_grad(): output = model(noise, time_step).sample.permute(0, 2, 1) output_slice = output[0, -3:, -3:].flatten() # fmt: off expected_output_slice = torch.tensor([-2.137172, 1.1426016, 0.3688687, -0.766922, 0.7303146, 0.11038864, -0.4760633, 0.13270172, 0.02591348]) # fmt: on self.assertTrue(torch.allclose(output_slice, expected_output_slice, rtol=1e-3)) def test_forward_with_norm_groups(self): # Not implemented yet for this UNet pass @slow def test_unet_1d_maestro(self): model_id = "harmonai/maestro-150k" model = UNet1DModel.from_pretrained(model_id, subfolder="unet") model.to(torch_device) sample_size = 65536 noise = torch.sin(torch.arange(sample_size)[None, None, :].repeat(1, 2, 1)).to(torch_device) timestep = torch.tensor([1]).to(torch_device) with torch.no_grad(): output = model(noise, timestep).sample output_sum = output.abs().sum() output_max = output.abs().max() assert (output_sum - 224.0896).abs() < 0.5 assert (output_max - 0.0607).abs() < 4e-4 class UNetRLModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet1DModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_features = 14 seq_len = 16 noise = floats_tensor((batch_size, num_features, seq_len)).to(torch_device) time_step = torch.tensor([10] * batch_size).to(torch_device) return {"sample": noise, "timestep": time_step} @property def input_shape(self): return (4, 14, 16) @property def output_shape(self): return (4, 14, 1) def test_determinism(self): super().test_determinism() def test_outputs_equivalence(self): super().test_outputs_equivalence() def test_from_save_pretrained(self): super().test_from_save_pretrained() def test_from_save_pretrained_variant(self): super().test_from_save_pretrained_variant() def test_model_from_pretrained(self): super().test_model_from_pretrained() def test_output(self): # UNetRL is a value-function is different output shape init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = torch.Size((inputs_dict["sample"].shape[0], 1)) self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_ema_training(self): pass def test_training(self): pass def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 14, "out_channels": 14, "down_block_types": ["DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D", "DownResnetBlock1D"], "up_block_types": [], "out_block_type": "ValueFunction", "mid_block_type": "ValueFunctionMidBlock1D", "block_out_channels": [32, 64, 128, 256], "layers_per_block": 1, "downsample_each_block": True, "use_timestep_embedding": True, "freq_shift": 1.0, "flip_sin_to_cos": False, "time_embedding_type": "positional", "act_fn": "mish", } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_from_pretrained_hub(self): value_function, vf_loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" ) self.assertIsNotNone(value_function) self.assertEqual(len(vf_loading_info["missing_keys"]), 0) value_function.to(torch_device) image = value_function(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): value_function, vf_loading_info = UNet1DModel.from_pretrained( "bglick13/hopper-medium-v2-value-function-hor32", output_loading_info=True, subfolder="value_function" ) torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_features = value_function.config.in_channels seq_len = 14 noise = torch.randn((1, seq_len, num_features)).permute( 0, 2, 1 ) # match original, we can update values and remove time_step = torch.full((num_features,), 0) with torch.no_grad(): output = value_function(noise, time_step).sample # fmt: off expected_output_slice = torch.tensor([165.25] * seq_len) # fmt: on self.assertTrue(torch.allclose(output, expected_output_slice, rtol=1e-3)) def test_forward_with_norm_groups(self): # Not implemented yet for this UNet pass
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_modeling_common_flax.py
import inspect from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax @require_flax class FlaxModelTesterMixin: def test_output(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) variables = model.init(inputs_dict["prng_key"], inputs_dict["sample"]) jax.lax.stop_gradient(variables) output = model.apply(variables, inputs_dict["sample"]) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 32) model = self.model_class(**init_dict) variables = model.init(inputs_dict["prng_key"], inputs_dict["sample"]) jax.lax.stop_gradient(variables) output = model.apply(variables, inputs_dict["sample"]) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_deprecated_kwargs(self): has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0 if has_kwarg_in_model_class and not has_deprecated_kwarg: raise ValueError( f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are" " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" " [<deprecated_argument>]`" ) if not has_kwarg_in_model_class and has_deprecated_kwarg: raise ValueError( f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to" f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument" " from `_deprecated_kwargs = [<deprecated_argument>]`" )
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_activations.py
import unittest import torch from torch import nn from diffusers.models.activations import get_activation class ActivationsTests(unittest.TestCase): def test_swish(self): act = get_activation("swish") self.assertIsInstance(act, nn.SiLU) self.assertEqual(act(torch.tensor(-100, dtype=torch.float32)).item(), 0) self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) def test_silu(self): act = get_activation("silu") self.assertIsInstance(act, nn.SiLU) self.assertEqual(act(torch.tensor(-100, dtype=torch.float32)).item(), 0) self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) def test_mish(self): act = get_activation("mish") self.assertIsInstance(act, nn.Mish) self.assertEqual(act(torch.tensor(-200, dtype=torch.float32)).item(), 0) self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20) def test_gelu(self): act = get_activation("gelu") self.assertIsInstance(act, nn.GELU) self.assertEqual(act(torch.tensor(-100, dtype=torch.float32)).item(), 0) self.assertNotEqual(act(torch.tensor(-1, dtype=torch.float32)).item(), 0) self.assertEqual(act(torch.tensor(0, dtype=torch.float32)).item(), 0) self.assertEqual(act(torch.tensor(20, dtype=torch.float32)).item(), 20)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_vae.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from parameterized import parameterized from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderKLTemporalDecoder, AutoencoderTiny, ConsistencyDecoderVAE, StableDiffusionPipeline, ) from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.loading_utils import load_image from diffusers.utils.testing_utils import ( backend_empty_cache, enable_full_determinism, floats_tensor, load_hf_numpy, require_torch_accelerator, require_torch_accelerator_with_fp16, require_torch_accelerator_with_training, require_torch_gpu, skip_mps, slow, torch_all_close, torch_device, ) from diffusers.utils.torch_utils import randn_tensor from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() def get_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [32, 64] norm_num_groups = norm_num_groups or 32 init_dict = { "block_out_channels": block_out_channels, "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), "latent_channels": 4, "norm_num_groups": norm_num_groups, } return init_dict def get_asym_autoencoder_kl_config(block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [32, 64] norm_num_groups = norm_num_groups or 32 init_dict = { "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "down_block_out_channels": block_out_channels, "layers_per_down_block": 1, "up_block_types": ["UpDecoderBlock2D"] * len(block_out_channels), "up_block_out_channels": block_out_channels, "layers_per_up_block": 1, "act_fn": "silu", "latent_channels": 4, "norm_num_groups": norm_num_groups, "sample_size": 32, "scaling_factor": 0.18215, } return init_dict def get_autoencoder_tiny_config(block_out_channels=None): block_out_channels = (len(block_out_channels) * [32]) if block_out_channels is not None else [32, 32] init_dict = { "in_channels": 3, "out_channels": 3, "encoder_block_out_channels": block_out_channels, "decoder_block_out_channels": block_out_channels, "num_encoder_blocks": [b // min(block_out_channels) for b in block_out_channels], "num_decoder_blocks": [b // min(block_out_channels) for b in reversed(block_out_channels)], } return init_dict def get_consistency_vae_config(block_out_channels=None, norm_num_groups=None): block_out_channels = block_out_channels or [32, 64] norm_num_groups = norm_num_groups or 32 return { "encoder_block_out_channels": block_out_channels, "encoder_in_channels": 3, "encoder_out_channels": 4, "encoder_down_block_types": ["DownEncoderBlock2D"] * len(block_out_channels), "decoder_add_attention": False, "decoder_block_out_channels": block_out_channels, "decoder_down_block_types": ["ResnetDownsampleBlock2D"] * len(block_out_channels), "decoder_downsample_padding": 1, "decoder_in_channels": 7, "decoder_layers_per_block": 1, "decoder_norm_eps": 1e-05, "decoder_norm_num_groups": norm_num_groups, "encoder_norm_num_groups": norm_num_groups, "decoder_num_train_timesteps": 1024, "decoder_out_channels": 6, "decoder_resnet_time_scale_shift": "scale_shift", "decoder_time_embedding_type": "learned", "decoder_up_block_types": ["ResnetUpsampleBlock2D"] * len(block_out_channels), "scaling_factor": 1, "latent_channels": 4, } class AutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AutoencoderKL main_input_name = "sample" base_precision = 1e-2 @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = get_autoencoder_kl_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_forward_signature(self): pass def test_training(self): pass @require_torch_accelerator_with_training def test_gradient_checkpointing(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) assert not model.is_gradient_checkpointing and model.training out = model(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() labels = torch.randn_like(out) loss = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing model_2 = self.model_class(**init_dict) # clone model model_2.load_state_dict(model.state_dict()) model_2.to(torch_device) model_2.enable_gradient_checkpointing() assert model_2.is_gradient_checkpointing and model_2.training out_2 = model_2(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_2.zero_grad() loss_2 = (out_2 - labels).mean() loss_2.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_2).abs() < 1e-5) named_params = dict(model.named_parameters()) named_params_2 = dict(model_2.named_parameters()) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5)) def test_from_pretrained_hub(self): model, loading_info = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): model = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy") model = model.to(torch_device) model.eval() # Keep generator on CPU for non-CUDA devices to compare outputs with CPU result tensors generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda" if torch_device != "mps": generator = torch.Generator(device=generator_device).manual_seed(0) else: generator = torch.manual_seed(0) image = torch.randn( 1, model.config.in_channels, model.config.sample_size, model.config.sample_size, generator=torch.manual_seed(0), ) image = image.to(torch_device) with torch.no_grad(): output = model(image, sample_posterior=True, generator=generator).sample output_slice = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": expected_output_slice = torch.tensor( [ -4.0078e-01, -3.8323e-04, -1.2681e-01, -1.1462e-01, 2.0095e-01, 1.0893e-01, -8.8247e-02, -3.0361e-01, -9.8644e-03, ] ) elif generator_device == "cpu": expected_output_slice = torch.tensor( [ -0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026, ] ) else: expected_output_slice = torch.tensor( [ -0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485, ] ) self.assertTrue(torch_all_close(output_slice, expected_output_slice, rtol=1e-2)) class AsymmetricAutoencoderKLTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = AsymmetricAutoencoderKL main_input_name = "sample" base_precision = 1e-2 @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) mask = torch.ones((batch_size, 1) + sizes).to(torch_device) return {"sample": image, "mask": mask} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = get_asym_autoencoder_kl_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_forward_signature(self): pass def test_forward_with_norm_groups(self): pass class AutoencoderTinyTests(ModelTesterMixin, unittest.TestCase): model_class = AutoencoderTiny main_input_name = "sample" base_precision = 1e-2 @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = get_autoencoder_tiny_config() inputs_dict = self.dummy_input return init_dict, inputs_dict def test_outputs_equivalence(self): pass class ConsistencyDecoderVAETests(ModelTesterMixin, unittest.TestCase): model_class = ConsistencyDecoderVAE main_input_name = "sample" base_precision = 1e-2 forward_requires_fresh_args = True def inputs_dict(self, seed=None): generator = torch.Generator("cpu") if seed is not None: generator.manual_seed(0) image = randn_tensor((4, 3, 32, 32), generator=generator, device=torch.device(torch_device)) return {"sample": image, "generator": generator} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) @property def init_dict(self): return get_consistency_vae_config() def prepare_init_args_and_inputs_for_common(self): return self.init_dict, self.inputs_dict() @unittest.skip def test_training(self): ... @unittest.skip def test_ema_training(self): ... class AutoncoderKLTemporalDecoderFastTests(ModelTesterMixin, unittest.TestCase): model_class = AutoencoderKLTemporalDecoder main_input_name = "sample" base_precision = 1e-2 @property def dummy_input(self): batch_size = 3 num_channels = 3 sizes = (32, 32) image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) num_frames = 3 return {"sample": image, "num_frames": num_frames} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "latent_channels": 4, "layers_per_block": 2, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_forward_signature(self): pass def test_training(self): pass @unittest.skipIf(torch_device == "mps", "Gradient checkpointing skipped on MPS") def test_gradient_checkpointing(self): # enable deterministic behavior for gradient checkpointing init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) assert not model.is_gradient_checkpointing and model.training out = model(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() labels = torch.randn_like(out) loss = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing model_2 = self.model_class(**init_dict) # clone model model_2.load_state_dict(model.state_dict()) model_2.to(torch_device) model_2.enable_gradient_checkpointing() assert model_2.is_gradient_checkpointing and model_2.training out_2 = model_2(**inputs_dict).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_2.zero_grad() loss_2 = (out_2 - labels).mean() loss_2.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_2).abs() < 1e-5) named_params = dict(model.named_parameters()) named_params_2 = dict(model_2.named_parameters()) for name, param in named_params.items(): if "post_quant_conv" in name: continue self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5)) @slow class AutoencoderTinyIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_sd_vae_model(self, model_id="hf-internal-testing/taesd-diffusers", fp16=False): torch_dtype = torch.float16 if fp16 else torch.float32 model = AutoencoderTiny.from_pretrained(model_id, torch_dtype=torch_dtype) model.to(torch_device).eval() return model @parameterized.expand( [ [(1, 4, 73, 97), (1, 3, 584, 776)], [(1, 4, 97, 73), (1, 3, 776, 584)], [(1, 4, 49, 65), (1, 3, 392, 520)], [(1, 4, 65, 49), (1, 3, 520, 392)], [(1, 4, 49, 49), (1, 3, 392, 392)], ] ) def test_tae_tiling(self, in_shape, out_shape): model = self.get_sd_vae_model() model.enable_tiling() with torch.no_grad(): zeros = torch.zeros(in_shape).to(torch_device) dec = model.decode(zeros).sample assert dec.shape == out_shape def test_stable_diffusion(self): model = self.get_sd_vae_model() image = self.get_sd_image(seed=33) with torch.no_grad(): sample = model(image).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor([0.0093, 0.6385, -0.1274, 0.1631, -0.1762, 0.5232, -0.3108, -0.0382]) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand([(True,), (False,)]) def test_tae_roundtrip(self, enable_tiling): # load the autoencoder model = self.get_sd_vae_model() if enable_tiling: model.enable_tiling() # make a black image with a white square in the middle, # which is large enough to split across multiple tiles image = -torch.ones(1, 3, 1024, 1024, device=torch_device) image[..., 256:768, 256:768] = 1.0 # round-trip the image through the autoencoder with torch.no_grad(): sample = model(image).sample # the autoencoder reconstruction should match original image, sorta def downscale(x): return torch.nn.functional.avg_pool2d(x, model.spatial_scale_factor) assert torch_all_close(downscale(sample), downscale(image), atol=0.125) @slow class AutoencoderKLIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_sd_vae_model(self, model_id="CompVis/stable-diffusion-v1-4", fp16=False): revision = "fp16" if fp16 else None torch_dtype = torch.float16 if fp16 else torch.float32 model = AutoencoderKL.from_pretrained( model_id, subfolder="vae", torch_dtype=torch_dtype, revision=revision, ) model.to(torch_device) return model def get_generator(self, seed=0): generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda" if torch_device != "mps": return torch.Generator(device=generator_device).manual_seed(seed) return torch.manual_seed(seed) @parameterized.expand( [ # fmt: off [ 33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824], ], [ 47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131], ], # fmt: on ] ) def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): sample = model(image, generator=generator, sample_posterior=True).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_stable_diffusion_fp16(self, seed, expected_slice): model = self.get_sd_vae_model(fp16=True) image = self.get_sd_image(seed, fp16=True) generator = self.get_generator(seed) with torch.no_grad(): sample = model(image, generator=generator, sample_posterior=True).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-2) @parameterized.expand( [ # fmt: off [ 33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824], ], [ 47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131], ], # fmt: on ] ) def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) with torch.no_grad(): sample = model(image).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_stable_diffusion_decode(self, seed, expected_slice): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] output_slice = sample[-1, -2:, :2, -2:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=1e-3) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_accelerator_with_fp16 def test_stable_diffusion_decode_fp16(self, seed, expected_slice): model = self.get_sd_vae_model(fp16=True) encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True) with torch.no_grad(): sample = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] output_slice = sample[-1, -2:, :2, -2:].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand([(13,), (16,), (27,)]) @require_torch_gpu @unittest.skipIf( not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.", ) def test_stable_diffusion_decode_xformers_vs_2_0_fp16(self, seed): model = self.get_sd_vae_model(fp16=True) encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64), fp16=True) with torch.no_grad(): sample = model.decode(encoding).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): sample_2 = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] assert torch_all_close(sample, sample_2, atol=1e-1) @parameterized.expand([(13,), (16,), (37,)]) @require_torch_gpu @unittest.skipIf( not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.", ) def test_stable_diffusion_decode_xformers_vs_2_0(self, seed): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): sample_2 = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] assert torch_all_close(sample, sample_2, atol=1e-2) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def test_stable_diffusion_encode_sample(self, seed, expected_slice): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): dist = model.encode(image).latent_dist sample = dist.sample(generator=generator) assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] output_slice = sample[0, -1, -3:, -3:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) tolerance = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(output_slice, expected_output_slice, atol=tolerance) def test_stable_diffusion_model_local(self): model_id = "stabilityai/sd-vae-ft-mse" model_1 = AutoencoderKL.from_pretrained(model_id).to(torch_device) url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors" model_2 = AutoencoderKL.from_single_file(url).to(torch_device) image = self.get_sd_image(33) with torch.no_grad(): sample_1 = model_1(image).sample sample_2 = model_2(image).sample assert sample_1.shape == sample_2.shape output_slice_1 = sample_1[-1, -2:, -2:, :2].flatten().float().cpu() output_slice_2 = sample_2[-1, -2:, -2:, :2].flatten().float().cpu() assert torch_all_close(output_slice_1, output_slice_2, atol=3e-3) @slow class AsymmetricAutoencoderKLIntegrationTests(unittest.TestCase): def get_file_format(self, seed, shape): return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy" def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() backend_empty_cache(torch_device) def get_sd_image(self, seed=0, shape=(4, 3, 512, 512), fp16=False): dtype = torch.float16 if fp16 else torch.float32 image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype) return image def get_sd_vae_model(self, model_id="cross-attention/asymmetric-autoencoder-kl-x-1-5", fp16=False): revision = "main" torch_dtype = torch.float32 model = AsymmetricAutoencoderKL.from_pretrained( model_id, torch_dtype=torch_dtype, revision=revision, ) model.to(torch_device).eval() return model def get_generator(self, seed=0): generator_device = "cpu" if not torch_device.startswith("cuda") else "cuda" if torch_device != "mps": return torch.Generator(device=generator_device).manual_seed(seed) return torch.manual_seed(seed) @parameterized.expand( [ # fmt: off [ 33, [-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078], [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], ], [ 47, [0.4400, 0.0543, 0.2873, 0.2946, 0.0553, 0.0839, -0.1585, 0.2529], [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], ], # fmt: on ] ) def test_stable_diffusion(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): sample = model(image, generator=generator, sample_posterior=True).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=5e-3) @parameterized.expand( [ # fmt: off [ 33, [-0.0340, 0.2870, 0.1698, -0.0105, -0.3448, 0.3529, -0.1321, 0.1097], [-0.0344, 0.2912, 0.1687, -0.0137, -0.3462, 0.3552, -0.1337, 0.1078], ], [ 47, [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531], [0.4397, 0.0550, 0.2873, 0.2946, 0.0567, 0.0855, -0.1580, 0.2531], ], # fmt: on ] ) def test_stable_diffusion_mode(self, seed, expected_slice, expected_slice_mps): model = self.get_sd_vae_model() image = self.get_sd_image(seed) with torch.no_grad(): sample = model(image).sample assert sample.shape == image.shape output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu() expected_output_slice = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=3e-3) @parameterized.expand( [ # fmt: off [13, [-0.0521, -0.2939, 0.1540, -0.1855, -0.5936, -0.3138, -0.4579, -0.2275]], [37, [-0.1820, -0.4345, -0.0455, -0.2923, -0.8035, -0.5089, -0.4795, -0.3106]], # fmt: on ] ) @require_torch_accelerator @skip_mps def test_stable_diffusion_decode(self, seed, expected_slice): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] output_slice = sample[-1, -2:, :2, -2:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) assert torch_all_close(output_slice, expected_output_slice, atol=2e-3) @parameterized.expand([(13,), (16,), (37,)]) @require_torch_gpu @unittest.skipIf( not is_xformers_available(), reason="xformers is not required when using PyTorch 2.0.", ) def test_stable_diffusion_decode_xformers_vs_2_0(self, seed): model = self.get_sd_vae_model() encoding = self.get_sd_image(seed, shape=(3, 4, 64, 64)) with torch.no_grad(): sample = model.decode(encoding).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): sample_2 = model.decode(encoding).sample assert list(sample.shape) == [3, 3, 512, 512] assert torch_all_close(sample, sample_2, atol=5e-2) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def test_stable_diffusion_encode_sample(self, seed, expected_slice): model = self.get_sd_vae_model() image = self.get_sd_image(seed) generator = self.get_generator(seed) with torch.no_grad(): dist = model.encode(image).latent_dist sample = dist.sample(generator=generator) assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] output_slice = sample[0, -1, -3:, -3:].flatten().cpu() expected_output_slice = torch.tensor(expected_slice) tolerance = 3e-3 if torch_device != "mps" else 1e-2 assert torch_all_close(output_slice, expected_output_slice, atol=tolerance) @slow class ConsistencyDecoderVAEIntegrationTests(unittest.TestCase): def tearDown(self): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @torch.no_grad() def test_encode_decode(self): vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update vae.to(torch_device) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ).resize((256, 256)) image = torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[ None, :, :, : ].cuda() latent = vae.encode(image).latent_dist.mean sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample actual_output = sample[0, :2, :2, :2].flatten().cpu() expected_output = torch.tensor([-0.0141, -0.0014, 0.0115, 0.0086, 0.1051, 0.1053, 0.1031, 0.1024]) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_sd(self): vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder") # TODO - update pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", vae=vae, safety_checker=None) pipe.to(torch_device) out = pipe( "horse", num_inference_steps=2, output_type="pt", generator=torch.Generator("cpu").manual_seed(0), ).images[0] actual_output = out[:2, :2, :2].flatten().cpu() expected_output = torch.tensor([0.7686, 0.8228, 0.6489, 0.7455, 0.8661, 0.8797, 0.8241, 0.8759]) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_encode_decode_f16(self): vae = ConsistencyDecoderVAE.from_pretrained( "openai/consistency-decoder", torch_dtype=torch.float16 ) # TODO - update vae.to(torch_device) image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/img2img/sketch-mountains-input.jpg" ).resize((256, 256)) image = ( torch.from_numpy(np.array(image).transpose(2, 0, 1).astype(np.float32) / 127.5 - 1)[None, :, :, :] .half() .cuda() ) latent = vae.encode(image).latent_dist.mean sample = vae.decode(latent, generator=torch.Generator("cpu").manual_seed(0)).sample actual_output = sample[0, :2, :2, :2].flatten().cpu() expected_output = torch.tensor( [-0.0111, -0.0125, -0.0017, -0.0007, 0.1257, 0.1465, 0.1450, 0.1471], dtype=torch.float16, ) assert torch_all_close(actual_output, expected_output, atol=5e-3) def test_sd_f16(self): vae = ConsistencyDecoderVAE.from_pretrained( "openai/consistency-decoder", torch_dtype=torch.float16 ) # TODO - update pipe = StableDiffusionPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16, vae=vae, safety_checker=None, ) pipe.to(torch_device) out = pipe( "horse", num_inference_steps=2, output_type="pt", generator=torch.Generator("cpu").manual_seed(0), ).images[0] actual_output = out[:2, :2, :2].flatten().cpu() expected_output = torch.tensor( [0.0000, 0.0249, 0.0000, 0.0000, 0.1709, 0.2773, 0.0471, 0.1035], dtype=torch.float16, ) assert torch_all_close(actual_output, expected_output, atol=5e-3)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_unet_3d_condition.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from diffusers.models import ModelMixin, UNet3DConditionModel from diffusers.utils import logging from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() logger = logging.get_logger(__name__) @skip_mps class UNet3DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = UNet3DConditionModel main_input_name = "sample" @property def dummy_input(self): batch_size = 4 num_channels = 4 num_frames = 4 sizes = (32, 32) noise = floats_tensor((batch_size, num_channels, num_frames) + sizes).to(torch_device) time_step = torch.tensor([10]).to(torch_device) encoder_hidden_states = floats_tensor((batch_size, 4, 32)).to(torch_device) return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states} @property def input_shape(self): return (4, 4, 32, 32) @property def output_shape(self): return (4, 4, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": (32, 64), "down_block_types": ( "CrossAttnDownBlock3D", "DownBlock3D", ), "up_block_types": ("UpBlock3D", "CrossAttnUpBlock3D"), "cross_attention_dim": 32, "attention_head_dim": 8, "out_channels": 4, "in_channels": 4, "layers_per_block": 1, "sample_size": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_xformers_enable_works(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.enable_xformers_memory_efficient_attention() assert ( model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__ == "XFormersAttnProcessor" ), "xformers is not enabled" # Overriding to set `norm_num_groups` needs to be different for this model. def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 32 model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.sample self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") # Overriding since the UNet3D outputs a different structure. def test_determinism(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): # Warmup pass when using mps (see #372) if torch_device == "mps" and isinstance(model, ModelMixin): model(**self.dummy_input) first = model(**inputs_dict) if isinstance(first, dict): first = first.sample second = model(**inputs_dict) if isinstance(second, dict): second = second.sample out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) def test_model_attention_slicing(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["attention_head_dim"] = 8 model = self.model_class(**init_dict) model.to(torch_device) model.eval() model.set_attention_slice("auto") with torch.no_grad(): output = model(**inputs_dict) assert output is not None model.set_attention_slice("max") with torch.no_grad(): output = model(**inputs_dict) assert output is not None model.set_attention_slice(2) with torch.no_grad(): output = model(**inputs_dict) assert output is not None def test_feed_forward_chunking(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 32 model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict)[0] model.enable_forward_chunking() with torch.no_grad(): output_2 = model(**inputs_dict)[0] self.assertEqual(output.shape, output_2.shape, "Shape doesn't match") assert np.abs(output.cpu() - output_2.cpu()).max() < 1e-2
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_modeling_common.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import tempfile import traceback import unittest import unittest.mock as mock import uuid from typing import Dict, List, Tuple import numpy as np import requests_mock import torch from huggingface_hub import delete_repo from requests.exceptions import HTTPError from diffusers.models import UNet2DConditionModel from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor from diffusers.training_utils import EMAModel from diffusers.utils import is_xformers_available, logging from diffusers.utils.testing_utils import ( CaptureLogger, require_python39_or_higher, require_torch_2, require_torch_accelerator_with_training, require_torch_gpu, run_test_in_subprocess, torch_device, ) from ..others.test_utils import TOKEN, USER, is_staging_test # Will be run via run_test_in_subprocess def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout): error = None try: init_dict, model_class = in_queue.get(timeout=timeout) model = model_class(**init_dict) model.to(torch_device) model = torch.compile(model) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = model_class.from_pretrained(tmpdirname) new_model.to(torch_device) assert new_model.__class__ == model_class except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class ModelUtilsTest(unittest.TestCase): def tearDown(self): super().tearDown() def test_accelerate_loading_error_message(self): with self.assertRaises(ValueError) as error_context: UNet2DConditionModel.from_pretrained("hf-internal-testing/stable-diffusion-broken", subfolder="unet") # make sure that error message states what keys are missing assert "conv_out.bias" in str(error_context.exception) def test_cached_files_are_used_when_no_internet(self): # A mock response for an HTTP head request to emulate server down response_mock = mock.Mock() response_mock.status_code = 500 response_mock.headers = {} response_mock.raise_for_status.side_effect = HTTPError response_mock.json.return_value = {} # Download this model to make sure it's in the cache. orig_model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("requests.request", return_value=response_mock): # Download this model to make sure it's in the cache. model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", local_files_only=True ) for p1, p2 in zip(orig_model.parameters(), model.parameters()): if p1.data.ne(p2.data).sum() > 0: assert False, "Parameters not the same!" def test_one_request_upon_cached(self): # TODO: For some reason this test fails on MPS where no HEAD call is made. if torch_device == "mps": return use_safetensors = False with tempfile.TemporaryDirectory() as tmpdirname: with requests_mock.mock(real_http=True) as m: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) download_requests = [r.method for r in m.request_history] assert download_requests.count("HEAD") == 2, "2 HEAD requests one for config, one for model" assert download_requests.count("GET") == 2, "2 GET requests one for config, one for model" with requests_mock.mock(real_http=True) as m: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, use_safetensors=use_safetensors, ) cache_requests = [r.method for r in m.request_history] assert ( "HEAD" == cache_requests[0] and len(cache_requests) == 1 ), "We should call only `model_info` to check for _commit hash and `send_telemetry`" def test_weight_overwrite(self): with tempfile.TemporaryDirectory() as tmpdirname, self.assertRaises(ValueError) as error_context: UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, in_channels=9, ) # make sure that error message states what keys are missing assert "Cannot load" in str(error_context.exception) with tempfile.TemporaryDirectory() as tmpdirname: model = UNet2DConditionModel.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", cache_dir=tmpdirname, in_channels=9, low_cpu_mem_usage=False, ignore_mismatched_sizes=True, ) assert model.config.in_channels == 9 class UNetTesterMixin: def test_forward_signature(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["sample", "timestep"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_forward_with_norm_groups(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() init_dict["norm_num_groups"] = 16 init_dict["block_out_channels"] = (16, 32) model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) expected_shape = inputs_dict["sample"].shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") class ModelTesterMixin: main_input_name = None # overwrite in model specific tester class base_precision = 1e-3 forward_requires_fresh_args = False def test_from_save_pretrained(self, expected_max_diff=5e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname) if hasattr(new_model, "set_default_attn_processor"): new_model.set_default_attn_processor() new_model.to(torch_device) with torch.no_grad(): if self.forward_requires_fresh_args: image = model(**self.inputs_dict(0)) else: image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] if self.forward_requires_fresh_args: new_image = new_model(**self.inputs_dict(0)) else: new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") def test_getattr_is_correct(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) # save some things to test model.dummy_attribute = 5 model.register_to_config(test_attribute=5) logger = logging.get_logger("diffusers.models.modeling_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(model, "dummy_attribute") assert getattr(model, "dummy_attribute") == 5 assert model.dummy_attribute == 5 # no warning should be thrown assert cap_logger.out == "" logger = logging.get_logger("diffusers.models.modeling_utils") # 30 for warning logger.setLevel(30) with CaptureLogger(logger) as cap_logger: assert hasattr(model, "save_pretrained") fn = model.save_pretrained fn_1 = getattr(model, "save_pretrained") assert fn == fn_1 # no warning should be thrown assert cap_logger.out == "" # warning should be thrown with self.assertWarns(FutureWarning): assert model.test_attribute == 5 with self.assertWarns(FutureWarning): assert getattr(model, "test_attribute") == 5 with self.assertRaises(AttributeError) as error: model.does_not_exist assert str(error.exception) == f"'{type(model).__name__}' object has no attribute 'does_not_exist'" @unittest.skipIf( torch_device != "cuda" or not is_xformers_available(), reason="XFormers attention is only available with CUDA and `xformers` installed", ) def test_set_xformers_attn_processor_for_determinism(self): torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return model.set_default_attn_processor() assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output = model(**self.inputs_dict(0))[0] else: output = model(**inputs_dict)[0] model.enable_xformers_memory_efficient_attention() assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(XFormersAttnProcessor()) assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_3 = model(**self.inputs_dict(0))[0] else: output_3 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) assert torch.allclose(output, output_2, atol=self.base_precision) assert torch.allclose(output, output_3, atol=self.base_precision) assert torch.allclose(output_2, output_3, atol=self.base_precision) @require_torch_gpu def test_set_attn_processor_for_determinism(self): torch.use_deterministic_algorithms(False) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) if not hasattr(model, "set_attn_processor"): # If not has `set_attn_processor`, skip test return assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_1 = model(**self.inputs_dict(0))[0] else: output_1 = model(**inputs_dict)[0] model.set_default_attn_processor() assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_2 = model(**self.inputs_dict(0))[0] else: output_2 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessor2_0()) assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_4 = model(**self.inputs_dict(0))[0] else: output_4 = model(**inputs_dict)[0] model.set_attn_processor(AttnProcessor()) assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values()) with torch.no_grad(): if self.forward_requires_fresh_args: output_5 = model(**self.inputs_dict(0))[0] else: output_5 = model(**inputs_dict)[0] torch.use_deterministic_algorithms(True) # make sure that outputs match assert torch.allclose(output_2, output_1, atol=self.base_precision) assert torch.allclose(output_2, output_4, atol=self.base_precision) assert torch.allclose(output_2, output_5, atol=self.base_precision) def test_from_save_pretrained_variant(self, expected_max_diff=5e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) if hasattr(model, "set_default_attn_processor"): model.set_default_attn_processor() model.to(torch_device) model.eval() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16") if hasattr(new_model, "set_default_attn_processor"): new_model.set_default_attn_processor() # non-variant cannot be loaded with self.assertRaises(OSError) as error_context: self.model_class.from_pretrained(tmpdirname) # make sure that error message states what keys are missing assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception) new_model.to(torch_device) with torch.no_grad(): if self.forward_requires_fresh_args: image = model(**self.inputs_dict(0)) else: image = model(**inputs_dict) if isinstance(image, dict): image = image.to_tuple()[0] if self.forward_requires_fresh_args: new_image = new_model(**self.inputs_dict(0)) else: new_image = new_model(**inputs_dict) if isinstance(new_image, dict): new_image = new_image.to_tuple()[0] max_diff = (image - new_image).abs().max().item() self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes") @require_python39_or_higher @require_torch_2 def test_from_save_pretrained_dynamo(self): init_dict, _ = self.prepare_init_args_and_inputs_for_common() inputs = [init_dict, self.model_class] run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=inputs) def test_from_save_pretrained_dtype(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() for dtype in [torch.float32, torch.float16, torch.bfloat16]: if torch_device == "mps" and dtype == torch.bfloat16: continue with tempfile.TemporaryDirectory() as tmpdirname: model.to(dtype) model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=True, torch_dtype=dtype) assert new_model.dtype == dtype new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=False, torch_dtype=dtype) assert new_model.dtype == dtype def test_determinism(self, expected_max_diff=1e-5): if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): if self.forward_requires_fresh_args: first = model(**self.inputs_dict(0)) else: first = model(**inputs_dict) if isinstance(first, dict): first = first.to_tuple()[0] if self.forward_requires_fresh_args: second = model(**self.inputs_dict(0)) else: second = model(**inputs_dict) if isinstance(second, dict): second = second.to_tuple()[0] out_1 = first.cpu().numpy() out_2 = second.cpu().numpy() out_1 = out_1[~np.isnan(out_1)] out_2 = out_2[~np.isnan(out_2)] max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, expected_max_diff) def test_output(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] self.assertIsNotNone(output) # input & output have to have the same shape input_tensor = inputs_dict[self.main_input_name] expected_shape = input_tensor.shape self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match") def test_model_from_pretrained(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() # test if the model can be loaded from the config # and has all the expected shape with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, safe_serialization=False) new_model = self.model_class.from_pretrained(tmpdirname) new_model.to(torch_device) new_model.eval() # check if all parameters shape are the same for param_name in model.state_dict().keys(): param_1 = model.state_dict()[param_name] param_2 = new_model.state_dict()[param_name] self.assertEqual(param_1.shape, param_2.shape) with torch.no_grad(): output_1 = model(**inputs_dict) if isinstance(output_1, dict): output_1 = output_1.to_tuple()[0] output_2 = new_model(**inputs_dict) if isinstance(output_2, dict): output_2 = output_2.to_tuple()[0] self.assertEqual(output_1.shape, output_2.shape) @require_torch_accelerator_with_training def test_training(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.train() output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] input_tensor = inputs_dict[self.main_input_name] noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) loss = torch.nn.functional.mse_loss(output, noise) loss.backward() @require_torch_accelerator_with_training def test_ema_training(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.train() ema_model = EMAModel(model.parameters()) output = model(**inputs_dict) if isinstance(output, dict): output = output.to_tuple()[0] input_tensor = inputs_dict[self.main_input_name] noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device) loss = torch.nn.functional.mse_loss(output, noise) loss.backward() ema_model.step(model.parameters()) def test_outputs_equivalence(self): def set_nan_tensor_to_zero(t): # Temporary fallback until `aten::_index_put_impl_` is implemented in mps # Track progress in https://github.com/pytorch/pytorch/issues/77764 device = t.device if device.type == "mps": t = t.to("cpu") t[t != t] = 0 return t.to(device) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( torch.allclose( set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5 ), msg=( "Tuple and dict output are not equal. Difference:" f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has" f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}." ), ) if self.forward_requires_fresh_args: model = self.model_class(**self.init_dict) else: init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.model_class(**init_dict) model.to(torch_device) model.eval() with torch.no_grad(): if self.forward_requires_fresh_args: outputs_dict = model(**self.inputs_dict(0)) outputs_tuple = model(**self.inputs_dict(0), return_dict=False) else: outputs_dict = model(**inputs_dict) outputs_tuple = model(**inputs_dict, return_dict=False) recursive_check(outputs_tuple, outputs_dict) @require_torch_accelerator_with_training def test_enable_disable_gradient_checkpointing(self): if not self.model_class._supports_gradient_checkpointing: return # Skip test if model does not support gradient checkpointing init_dict, _ = self.prepare_init_args_and_inputs_for_common() # at init model should have gradient checkpointing disabled model = self.model_class(**init_dict) self.assertFalse(model.is_gradient_checkpointing) # check enable works model.enable_gradient_checkpointing() self.assertTrue(model.is_gradient_checkpointing) # check disable works model.disable_gradient_checkpointing() self.assertFalse(model.is_gradient_checkpointing) def test_deprecated_kwargs(self): has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0 if has_kwarg_in_model_class and not has_deprecated_kwarg: raise ValueError( f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are" " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" " [<deprecated_argument>]`" ) if not has_kwarg_in_model_class and has_deprecated_kwarg: raise ValueError( f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs" " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to" f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument" " from `_deprecated_kwargs = [<deprecated_argument>]`" ) @is_staging_test class ModelPushToHubTester(unittest.TestCase): identifier = uuid.uuid4() repo_id = f"test-model-{identifier}" org_repo_id = f"valid_org/{repo_id}-org" def test_push_to_hub(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}") for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.repo_id, token=TOKEN) def test_push_to_hub_in_organization(self): model = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) model.push_to_hub(self.org_repo_id, token=TOKEN) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(token=TOKEN, repo_id=self.org_repo_id) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id) new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id) for p1, p2 in zip(model.parameters(), new_model.parameters()): self.assertTrue(torch.equal(p1, p2)) # Reset repo delete_repo(self.org_repo_id, token=TOKEN)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_layers_utils.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from torch import nn from diffusers.models.attention import GEGLU, AdaLayerNorm, ApproximateGELU from diffusers.models.embeddings import get_timestep_embedding from diffusers.models.lora import LoRACompatibleLinear from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from diffusers.models.transformer_2d import Transformer2DModel from diffusers.utils.testing_utils import ( backend_manual_seed, require_torch_accelerator_with_fp64, torch_device, ) class EmbeddingsTests(unittest.TestCase): def test_timestep_embeddings(self): embedding_dim = 256 timesteps = torch.arange(16) t1 = get_timestep_embedding(timesteps, embedding_dim) # first vector should always be composed only of 0's and 1's assert (t1[0, : embedding_dim // 2] - 0).abs().sum() < 1e-5 assert (t1[0, embedding_dim // 2 :] - 1).abs().sum() < 1e-5 # last element of each vector should be one assert (t1[:, -1] - 1).abs().sum() < 1e-5 # For large embeddings (e.g. 128) the frequency of every vector is higher # than the previous one which means that the gradients of later vectors are # ALWAYS higher than the previous ones grad_mean = np.abs(np.gradient(t1, axis=-1)).mean(axis=1) prev_grad = 0.0 for grad in grad_mean: assert grad > prev_grad prev_grad = grad def test_timestep_defaults(self): embedding_dim = 16 timesteps = torch.arange(10) t1 = get_timestep_embedding(timesteps, embedding_dim) t2 = get_timestep_embedding( timesteps, embedding_dim, flip_sin_to_cos=False, downscale_freq_shift=1, max_period=10_000 ) assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3) def test_timestep_flip_sin_cos(self): embedding_dim = 16 timesteps = torch.arange(10) t1 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=True) t1 = torch.cat([t1[:, embedding_dim // 2 :], t1[:, : embedding_dim // 2]], dim=-1) t2 = get_timestep_embedding(timesteps, embedding_dim, flip_sin_to_cos=False) assert torch.allclose(t1.cpu(), t2.cpu(), 1e-3) def test_timestep_downscale_freq_shift(self): embedding_dim = 16 timesteps = torch.arange(10) t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0) t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1) # get cosine half (vectors that are wrapped into cosine) cosine_half = (t1 - t2)[:, embedding_dim // 2 :] # cosine needs to be negative assert (np.abs((cosine_half <= 0).numpy()) - 1).sum() < 1e-5 def test_sinoid_embeddings_hardcoded(self): embedding_dim = 64 timesteps = torch.arange(128) # standard unet, score_vde t1 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=1, flip_sin_to_cos=False) # glide, ldm t2 = get_timestep_embedding(timesteps, embedding_dim, downscale_freq_shift=0, flip_sin_to_cos=True) # grad-tts t3 = get_timestep_embedding(timesteps, embedding_dim, scale=1000) assert torch.allclose( t1[23:26, 47:50].flatten().cpu(), torch.tensor([0.9646, 0.9804, 0.9892, 0.9615, 0.9787, 0.9882, 0.9582, 0.9769, 0.9872]), 1e-3, ) assert torch.allclose( t2[23:26, 47:50].flatten().cpu(), torch.tensor([0.3019, 0.2280, 0.1716, 0.3146, 0.2377, 0.1790, 0.3272, 0.2474, 0.1864]), 1e-3, ) assert torch.allclose( t3[23:26, 47:50].flatten().cpu(), torch.tensor([-0.9801, -0.9464, -0.9349, -0.3952, 0.8887, -0.9709, 0.5299, -0.2853, -0.9927]), 1e-3, ) class Upsample2DBlockTests(unittest.TestCase): def test_upsample_default(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=False) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.2173, -1.2079, -1.2079, 0.2952, 1.1254, 1.1254, 0.2952, 1.1254, 1.1254]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_upsample_with_conv(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=True) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([0.7145, 1.3773, 0.3492, 0.8448, 1.0839, -0.3341, 0.5956, 0.1250, -0.4841]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_upsample_with_conv_out_dim(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=True, out_channels=64) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 64, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([0.2703, 0.1656, -0.2538, -0.0553, -0.2984, 0.1044, 0.1155, 0.2579, 0.7755]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_upsample_with_transpose(self): torch.manual_seed(0) sample = torch.randn(1, 32, 32, 32) upsample = Upsample2D(channels=32, use_conv=False, use_conv_transpose=True) with torch.no_grad(): upsampled = upsample(sample) assert upsampled.shape == (1, 32, 64, 64) output_slice = upsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.3028, -0.1582, 0.0071, 0.0350, -0.4799, -0.1139, 0.1056, -0.1153, -0.1046]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) class Downsample2DBlockTests(unittest.TestCase): def test_downsample_default(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=False) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 32, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.0513, -0.3889, 0.0640, 0.0836, -0.5460, -0.0341, -0.0169, -0.6967, 0.1179]) max_diff = (output_slice.flatten() - expected_slice).abs().sum().item() assert max_diff <= 1e-3 # assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-1) def test_downsample_with_conv(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=True) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 32, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor( [0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913], ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_downsample_with_conv_pad1(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=True, padding=1) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 32, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([0.9267, 0.5878, 0.3337, 1.2321, -0.1191, -0.3984, -0.7532, -0.0715, -0.3913]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_downsample_with_conv_out_dim(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64) downsample = Downsample2D(channels=32, use_conv=True, out_channels=16) with torch.no_grad(): downsampled = downsample(sample) assert downsampled.shape == (1, 16, 32, 32) output_slice = downsampled[0, -1, -3:, -3:] expected_slice = torch.tensor([-0.6586, 0.5985, 0.0721, 0.1256, -0.1492, 0.4436, -0.2544, 0.5021, 1.1522]) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) class ResnetBlock2DTests(unittest.TestCase): def test_resnet_default(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 64, 64) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-1.9010, -0.2974, -0.8245, -1.3533, 0.8742, -0.9645, -2.0584, 1.3387, -0.4746], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_restnet_with_use_in_shortcut(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, use_in_shortcut=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 64, 64) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [0.2226, -1.0791, -0.1629, 0.3659, -0.2889, -1.2376, 0.0582, 0.9206, 0.0044], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_resnet_up(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, up=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 128, 128) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [1.2130, -0.8753, -0.9027, 1.5783, -0.5362, -0.5001, 1.0726, -0.7732, -0.4182], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_resnet_down(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, down=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 32, 32) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_restnet_with_kernel_fir(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="fir", down=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 32, 32) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.0934, -0.5729, 0.0909, -0.2710, -0.5044, 0.0243, -0.0665, -0.5267, -0.3136], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_restnet_with_kernel_sde_vp(self): torch.manual_seed(0) sample = torch.randn(1, 32, 64, 64).to(torch_device) temb = torch.randn(1, 128).to(torch_device) resnet_block = ResnetBlock2D(in_channels=32, temb_channels=128, kernel="sde_vp", down=True).to(torch_device) with torch.no_grad(): output_tensor = resnet_block(sample, temb) assert output_tensor.shape == (1, 32, 32, 32) output_slice = output_tensor[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.3002, -0.7135, 0.1359, 0.0561, -0.7935, 0.0113, -0.1766, -0.6714, -0.0436], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) class Transformer2DModelTests(unittest.TestCase): def test_spatial_transformer_default(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) sample = torch.randn(1, 32, 64, 64).to(torch_device) spatial_transformer_block = Transformer2DModel( in_channels=32, num_attention_heads=1, attention_head_dim=32, dropout=0.0, cross_attention_dim=None, ).to(torch_device) with torch.no_grad(): attention_scores = spatial_transformer_block(sample).sample assert attention_scores.shape == (1, 32, 64, 64) output_slice = attention_scores[0, -1, -3:, -3:] expected_slice = torch.tensor( [-1.9455, -0.0066, -1.3933, -1.5878, 0.5325, -0.6486, -1.8648, 0.7515, -0.9689], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_spatial_transformer_cross_attention_dim(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) sample = torch.randn(1, 64, 64, 64).to(torch_device) spatial_transformer_block = Transformer2DModel( in_channels=64, num_attention_heads=2, attention_head_dim=32, dropout=0.0, cross_attention_dim=64, ).to(torch_device) with torch.no_grad(): context = torch.randn(1, 4, 64).to(torch_device) attention_scores = spatial_transformer_block(sample, context).sample assert attention_scores.shape == (1, 64, 64, 64) output_slice = attention_scores[0, -1, -3:, -3:] expected_slice = torch.tensor( [0.0143, -0.6909, -2.1547, -1.8893, 1.4097, 0.1359, -0.2521, -1.3359, 0.2598], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_spatial_transformer_timestep(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_embeds_ada_norm = 5 sample = torch.randn(1, 64, 64, 64).to(torch_device) spatial_transformer_block = Transformer2DModel( in_channels=64, num_attention_heads=2, attention_head_dim=32, dropout=0.0, cross_attention_dim=64, num_embeds_ada_norm=num_embeds_ada_norm, ).to(torch_device) with torch.no_grad(): timestep_1 = torch.tensor(1, dtype=torch.long).to(torch_device) timestep_2 = torch.tensor(2, dtype=torch.long).to(torch_device) attention_scores_1 = spatial_transformer_block(sample, timestep=timestep_1).sample attention_scores_2 = spatial_transformer_block(sample, timestep=timestep_2).sample assert attention_scores_1.shape == (1, 64, 64, 64) assert attention_scores_2.shape == (1, 64, 64, 64) output_slice_1 = attention_scores_1[0, -1, -3:, -3:] output_slice_2 = attention_scores_2[0, -1, -3:, -3:] expected_slice = torch.tensor( [-0.3923, -1.0923, -1.7144, -1.5570, 1.4154, 0.1738, -0.1157, -1.2998, -0.1703], device=torch_device ) expected_slice_2 = torch.tensor( [-0.4311, -1.1376, -1.7732, -1.5997, 1.3450, 0.0964, -0.1569, -1.3590, -0.2348], device=torch_device ) assert torch.allclose(output_slice_1.flatten(), expected_slice, atol=1e-3) assert torch.allclose(output_slice_2.flatten(), expected_slice_2, atol=1e-3) def test_spatial_transformer_dropout(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) sample = torch.randn(1, 32, 64, 64).to(torch_device) spatial_transformer_block = ( Transformer2DModel( in_channels=32, num_attention_heads=2, attention_head_dim=16, dropout=0.3, cross_attention_dim=None, ) .to(torch_device) .eval() ) with torch.no_grad(): attention_scores = spatial_transformer_block(sample).sample assert attention_scores.shape == (1, 32, 64, 64) output_slice = attention_scores[0, -1, -3:, -3:] expected_slice = torch.tensor( [-1.9380, -0.0083, -1.3771, -1.5819, 0.5209, -0.6441, -1.8545, 0.7563, -0.9615], device=torch_device ) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) @require_torch_accelerator_with_fp64 def test_spatial_transformer_discrete(self): torch.manual_seed(0) backend_manual_seed(torch_device, 0) num_embed = 5 sample = torch.randint(0, num_embed, (1, 32)).to(torch_device) spatial_transformer_block = ( Transformer2DModel( num_attention_heads=1, attention_head_dim=32, num_vector_embeds=num_embed, sample_size=16, ) .to(torch_device) .eval() ) with torch.no_grad(): attention_scores = spatial_transformer_block(sample).sample assert attention_scores.shape == (1, num_embed - 1, 32) output_slice = attention_scores[0, -2:, -3:] expected_slice = torch.tensor([-1.7648, -1.0241, -2.0985, -1.8035, -1.6404, -1.2098], device=torch_device) assert torch.allclose(output_slice.flatten(), expected_slice, atol=1e-3) def test_spatial_transformer_default_norm_layers(self): spatial_transformer_block = Transformer2DModel(num_attention_heads=1, attention_head_dim=32, in_channels=32) assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == nn.LayerNorm assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm def test_spatial_transformer_ada_norm_layers(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, num_embeds_ada_norm=5, ) assert spatial_transformer_block.transformer_blocks[0].norm1.__class__ == AdaLayerNorm assert spatial_transformer_block.transformer_blocks[0].norm3.__class__ == nn.LayerNorm def test_spatial_transformer_default_ff_layers(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, ) assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == GEGLU assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == LoRACompatibleLinear dim = 32 inner_dim = 128 # First dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim # NOTE: inner_dim * 2 because GEGLU assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim * 2 # Second dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim def test_spatial_transformer_geglu_approx_ff_layers(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, activation_fn="geglu-approximate", ) assert spatial_transformer_block.transformer_blocks[0].ff.net[0].__class__ == ApproximateGELU assert spatial_transformer_block.transformer_blocks[0].ff.net[1].__class__ == nn.Dropout assert spatial_transformer_block.transformer_blocks[0].ff.net[2].__class__ == LoRACompatibleLinear dim = 32 inner_dim = 128 # First dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.in_features == dim assert spatial_transformer_block.transformer_blocks[0].ff.net[0].proj.out_features == inner_dim # Second dimension change assert spatial_transformer_block.transformer_blocks[0].ff.net[2].in_features == inner_dim assert spatial_transformer_block.transformer_blocks[0].ff.net[2].out_features == dim def test_spatial_transformer_attention_bias(self): spatial_transformer_block = Transformer2DModel( num_attention_heads=1, attention_head_dim=32, in_channels=32, attention_bias=True ) assert spatial_transformer_block.transformer_blocks[0].attn1.to_q.bias is not None assert spatial_transformer_block.transformer_blocks[0].attn1.to_k.bias is not None assert spatial_transformer_block.transformer_blocks[0].attn1.to_v.bias is not None
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_unet_2d_blocks.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from diffusers.models.unet_2d_blocks import * # noqa F403 from diffusers.utils.testing_utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class DownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = DownBlock2D # noqa F405 block_type = "down" def test_output(self): expected_slice = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(expected_slice) class ResnetDownsampleBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = ResnetDownsampleBlock2D # noqa F405 block_type = "down" def test_output(self): expected_slice = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(expected_slice) class AttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = AttnDownBlock2D # noqa F405 block_type = "down" def test_output(self): expected_slice = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(expected_slice) class CrossAttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = CrossAttnDownBlock2D # noqa F405 block_type = "down" def prepare_init_args_and_inputs_for_common(self): init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() init_dict["cross_attention_dim"] = 32 return init_dict, inputs_dict def test_output(self): expected_slice = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(expected_slice) class SimpleCrossAttnDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = SimpleCrossAttnDownBlock2D # noqa F405 block_type = "down" @property def dummy_input(self): return super().get_dummy_input(include_encoder_hidden_states=True) def prepare_init_args_and_inputs_for_common(self): init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() init_dict["cross_attention_dim"] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == "mps", "MPS result is not consistent") def test_output(self): expected_slice = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(expected_slice) class SkipDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = SkipDownBlock2D # noqa F405 block_type = "down" @property def dummy_input(self): return super().get_dummy_input(include_skip_sample=True) def test_output(self): expected_slice = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(expected_slice) class AttnSkipDownBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = AttnSkipDownBlock2D # noqa F405 block_type = "down" @property def dummy_input(self): return super().get_dummy_input(include_skip_sample=True) def test_output(self): expected_slice = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(expected_slice) class DownEncoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = DownEncoderBlock2D # noqa F405 block_type = "down" @property def dummy_input(self): return super().get_dummy_input(include_temb=False) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 32, "out_channels": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): expected_slice = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(expected_slice) class AttnDownEncoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = AttnDownEncoderBlock2D # noqa F405 block_type = "down" @property def dummy_input(self): return super().get_dummy_input(include_temb=False) def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 32, "out_channels": 32, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): expected_slice = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(expected_slice) class UNetMidBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = UNetMidBlock2D # noqa F405 block_type = "mid" def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 32, "temb_channels": 128, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): expected_slice = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(expected_slice) class UNetMidBlock2DCrossAttnTests(UNetBlockTesterMixin, unittest.TestCase): block_class = UNetMidBlock2DCrossAttn # noqa F405 block_type = "mid" def prepare_init_args_and_inputs_for_common(self): init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() init_dict["cross_attention_dim"] = 32 return init_dict, inputs_dict def test_output(self): expected_slice = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(expected_slice) class UNetMidBlock2DSimpleCrossAttnTests(UNetBlockTesterMixin, unittest.TestCase): block_class = UNetMidBlock2DSimpleCrossAttn # noqa F405 block_type = "mid" @property def dummy_input(self): return super().get_dummy_input(include_encoder_hidden_states=True) def prepare_init_args_and_inputs_for_common(self): init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() init_dict["cross_attention_dim"] = 32 return init_dict, inputs_dict def test_output(self): expected_slice = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(expected_slice) class UpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = UpBlock2D # noqa F405 block_type = "up" @property def dummy_input(self): return super().get_dummy_input(include_res_hidden_states_tuple=True) def test_output(self): expected_slice = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(expected_slice) class ResnetUpsampleBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = ResnetUpsampleBlock2D # noqa F405 block_type = "up" @property def dummy_input(self): return super().get_dummy_input(include_res_hidden_states_tuple=True) def test_output(self): expected_slice = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(expected_slice) class CrossAttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = CrossAttnUpBlock2D # noqa F405 block_type = "up" @property def dummy_input(self): return super().get_dummy_input(include_res_hidden_states_tuple=True) def prepare_init_args_and_inputs_for_common(self): init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() init_dict["cross_attention_dim"] = 32 return init_dict, inputs_dict def test_output(self): expected_slice = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(expected_slice) class SimpleCrossAttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = SimpleCrossAttnUpBlock2D # noqa F405 block_type = "up" @property def dummy_input(self): return super().get_dummy_input(include_res_hidden_states_tuple=True, include_encoder_hidden_states=True) def prepare_init_args_and_inputs_for_common(self): init_dict, inputs_dict = super().prepare_init_args_and_inputs_for_common() init_dict["cross_attention_dim"] = 32 return init_dict, inputs_dict def test_output(self): expected_slice = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(expected_slice) class AttnUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = AttnUpBlock2D # noqa F405 block_type = "up" @property def dummy_input(self): return super().get_dummy_input(include_res_hidden_states_tuple=True) @unittest.skipIf(torch_device == "mps", "MPS result is not consistent") def test_output(self): expected_slice = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(expected_slice) class SkipUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = SkipUpBlock2D # noqa F405 block_type = "up" @property def dummy_input(self): return super().get_dummy_input(include_res_hidden_states_tuple=True) def test_output(self): expected_slice = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(expected_slice) class AttnSkipUpBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = AttnSkipUpBlock2D # noqa F405 block_type = "up" @property def dummy_input(self): return super().get_dummy_input(include_res_hidden_states_tuple=True) def test_output(self): expected_slice = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(expected_slice) class UpDecoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = UpDecoderBlock2D # noqa F405 block_type = "up" @property def dummy_input(self): return super().get_dummy_input(include_temb=False) def prepare_init_args_and_inputs_for_common(self): init_dict = {"in_channels": 32, "out_channels": 32} inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): expected_slice = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(expected_slice) class AttnUpDecoderBlock2DTests(UNetBlockTesterMixin, unittest.TestCase): block_class = AttnUpDecoderBlock2D # noqa F405 block_type = "up" @property def dummy_input(self): return super().get_dummy_input(include_temb=False) def prepare_init_args_and_inputs_for_common(self): init_dict = {"in_channels": 32, "out_channels": 32} inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self): expected_slice = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(expected_slice)
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_vae_flax.py
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class FlaxAutoencoderKLTests(FlaxModelTesterMixin, unittest.TestCase): model_class = FlaxAutoencoderKL @property def dummy_input(self): batch_size = 4 num_channels = 3 sizes = (32, 32) prng_key = jax.random.PRNGKey(0) image = jax.random.uniform(prng_key, ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } inputs_dict = self.dummy_input return init_dict, inputs_dict
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_models_vq.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from diffusers import VQModel from diffusers.utils.testing_utils import ( backend_manual_seed, enable_full_determinism, floats_tensor, torch_device, ) from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class VQModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase): model_class = VQModel main_input_name = "sample" @property def dummy_input(self, sizes=(32, 32)): batch_size = 4 num_channels = 3 image = floats_tensor((batch_size, num_channels) + sizes).to(torch_device) return {"sample": image} @property def input_shape(self): return (3, 32, 32) @property def output_shape(self): return (3, 32, 32) def prepare_init_args_and_inputs_for_common(self): init_dict = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 3, } inputs_dict = self.dummy_input return init_dict, inputs_dict def test_forward_signature(self): pass def test_training(self): pass def test_from_pretrained_hub(self): model, loading_info = VQModel.from_pretrained("fusing/vqgan-dummy", output_loading_info=True) self.assertIsNotNone(model) self.assertEqual(len(loading_info["missing_keys"]), 0) model.to(torch_device) image = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def test_output_pretrained(self): model = VQModel.from_pretrained("fusing/vqgan-dummy") model.to(torch_device).eval() torch.manual_seed(0) backend_manual_seed(torch_device, 0) image = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) image = image.to(torch_device) with torch.no_grad(): output = model(image).sample output_slice = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off expected_output_slice = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143]) # fmt: on self.assertTrue(torch.allclose(output_slice, expected_output_slice, atol=1e-3))
0
hf_public_repos/diffusers/tests
hf_public_repos/diffusers/tests/models/test_unet_blocks_common.py
# coding=utf-8 # Copyright 2023 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Tuple import torch from diffusers.utils.testing_utils import ( floats_tensor, require_torch, require_torch_accelerator_with_training, torch_all_close, torch_device, ) from diffusers.utils.torch_utils import randn_tensor @require_torch class UNetBlockTesterMixin: @property def dummy_input(self): return self.get_dummy_input() @property def output_shape(self): if self.block_type == "down": return (4, 32, 16, 16) elif self.block_type == "mid": return (4, 32, 32, 32) elif self.block_type == "up": return (4, 32, 64, 64) raise ValueError(f"'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.") def get_dummy_input( self, include_temb=True, include_res_hidden_states_tuple=False, include_encoder_hidden_states=False, include_skip_sample=False, ): batch_size = 4 num_channels = 32 sizes = (32, 32) generator = torch.manual_seed(0) device = torch.device(torch_device) shape = (batch_size, num_channels) + sizes hidden_states = randn_tensor(shape, generator=generator, device=device) dummy_input = {"hidden_states": hidden_states} if include_temb: temb_channels = 128 dummy_input["temb"] = randn_tensor((batch_size, temb_channels), generator=generator, device=device) if include_res_hidden_states_tuple: generator_1 = torch.manual_seed(1) dummy_input["res_hidden_states_tuple"] = (randn_tensor(shape, generator=generator_1, device=device),) if include_encoder_hidden_states: dummy_input["encoder_hidden_states"] = floats_tensor((batch_size, 32, 32)).to(torch_device) if include_skip_sample: dummy_input["skip_sample"] = randn_tensor(((batch_size, 3) + sizes), generator=generator, device=device) return dummy_input def prepare_init_args_and_inputs_for_common(self): init_dict = { "in_channels": 32, "out_channels": 32, "temb_channels": 128, } if self.block_type == "up": init_dict["prev_output_channel"] = 32 if self.block_type == "mid": init_dict.pop("out_channels") inputs_dict = self.dummy_input return init_dict, inputs_dict def test_output(self, expected_slice): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() unet_block = self.block_class(**init_dict) unet_block.to(torch_device) unet_block.eval() with torch.no_grad(): output = unet_block(**inputs_dict) if isinstance(output, Tuple): output = output[0] self.assertEqual(output.shape, self.output_shape) output_slice = output[0, -1, -3:, -3:] expected_slice = torch.tensor(expected_slice).to(torch_device) assert torch_all_close(output_slice.flatten(), expected_slice, atol=5e-3) @require_torch_accelerator_with_training def test_training(self): init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common() model = self.block_class(**init_dict) model.to(torch_device) model.train() output = model(**inputs_dict) if isinstance(output, Tuple): output = output[0] device = torch.device(torch_device) noise = randn_tensor(output.shape, device=device) loss = torch.nn.functional.mse_loss(output, noise) loss.backward()
0
hf_public_repos/diffusers/tests/fixtures
hf_public_repos/diffusers/tests/fixtures/custom_pipeline/what_ever.py
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Optional, Tuple, Union import torch from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput class CustomLocalPipeline(DiffusionPipeline): r""" This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) Parameters: unet ([`UNet2DModel`]): U-Net architecture to denoise the encoded image. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[torch.Generator] = None, num_inference_steps: int = 50, output_type: Optional[str] = "pil", return_dict: bool = True, **kwargs, ) -> Union[ImagePipelineOutput, Tuple]: r""" Args: batch_size (`int`, *optional*, defaults to 1): The number of images to generate. generator (`torch.Generator`, *optional*): A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. eta (`float`, *optional*, defaults to 0.0): The eta parameter which controls the scale of the variance (0 is DDIM and 1 is one type of DDPM). num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. output_type (`str`, *optional*, defaults to `"pil"`): The output format of the generate image. Choose between [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. Returns: [`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the generated images. """ # Sample gaussian noise to begin loop image = torch.randn( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=generator, ) image = image.to(self.device) # set step values self.scheduler.set_timesteps(num_inference_steps) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output model_output = self.unet(image, t).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 image = self.scheduler.step(model_output, t, image).prev_sample image = (image / 2 + 0.5).clamp(0, 1) image = image.cpu().permute(0, 2, 3, 1).numpy() if output_type == "pil": image = self.numpy_to_pil(image) if not return_dict: return (image,), "This is a local test" return ImagePipelineOutput(images=image), "This is a local test"
0