text
stringlengths
7
328k
id
stringlengths
14
166
metadata
dict
__index_level_0__
int64
0
459
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNet2DConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class LDMTextToImagePipelineFastTests(PipelineTesterMixin, unittest.TestCase): pipeline_class = LDMTextToImagePipeline params = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } required_optional_params = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } batch_params = TEXT_TO_IMAGE_BATCH_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=(32, 64), in_channels=3, out_channels=3, down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D"), up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D"), latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_inference_text2img(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() pipe = LDMTextToImagePipeline(**components) pipe.to(device) pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) expected_slice = np.array([0.6101, 0.6156, 0.5622, 0.4895, 0.6661, 0.3804, 0.5748, 0.6136, 0.5014]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3 @nightly @require_torch_gpu class LDMTextToImagePipelineSlowTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_ldm_default_ddim(self): pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 256, 256, 3) expected_slice = np.array([0.51825, 0.52850, 0.52543, 0.54258, 0.52304, 0.52569, 0.54363, 0.55276, 0.56878]) max_diff = np.abs(expected_slice - image_slice).max() assert max_diff < 1e-3 @nightly @require_torch_gpu class LDMTextToImagePipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, dtype=torch.float32, seed=0): generator = torch.manual_seed(seed) latents = np.random.RandomState(seed).standard_normal((1, 4, 32, 32)) latents = torch.from_numpy(latents).to(device=device, dtype=dtype) inputs = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_ldm_default_ddim(self): pipe = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(torch_device) pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3
diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion.py/0
{ "file_path": "diffusers/tests/pipelines/latent_diffusion/test_latent_diffusion.py", "repo_id": "diffusers", "token_count": 3341 }
146
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import gc import random import traceback import unittest import numpy as np import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AsymmetricAutoencoderKL, AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, LCMScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel, ) from diffusers.models.attention_processor import AttnProcessor from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, nightly, numpy_cosine_similarity_distance, require_python39_or_higher, require_torch_2, require_torch_gpu, run_test_in_subprocess, slow, torch_device, ) from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, ) enable_full_determinism() # Will be run via run_test_in_subprocess def _test_inpaint_compile(in_queue, out_queue, timeout): error = None try: inputs = in_queue.get(timeout=timeout) torch_device = inputs.pop("torch_device") seed = inputs.pop("seed") inputs["generator"] = torch.Generator(device=torch_device).manual_seed(seed) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.unet.to(memory_format=torch.channels_last) pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0689, 0.0699, 0.0790, 0.0536, 0.0470, 0.0488, 0.041, 0.0508, 0.04179]) assert np.abs(expected_slice - image_slice).max() < 3e-3 except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class StableDiffusionInpaintPipelineFastTests( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess image_latents_params = frozenset([]) callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"mask", "masked_image_latents"}) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), time_cond_proj_dim=time_cond_proj_dim, layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs(self, device, seed=0, img_res=64, output_pil=True): # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched if output_pil: # Get random floats in [0, 1] as image image = floats_tensor((1, 3, 32, 32), rng=random.Random(seed)).to(device) image = image.cpu().permute(0, 2, 3, 1)[0] mask_image = torch.ones_like(image) # Convert image and mask_image to [0, 255] image = 255 * image mask_image = 255 * mask_image # Convert to PIL image init_image = Image.fromarray(np.uint8(image)).convert("RGB").resize((img_res, img_res)) mask_image = Image.fromarray(np.uint8(mask_image)).convert("RGB").resize((img_res, img_res)) else: # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) # Convert image to [-1, 1] init_image = 2.0 * image - 1.0 mask_image = torch.ones((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4703, 0.5697, 0.3879, 0.5470, 0.6042, 0.4413, 0.5078, 0.4728, 0.4469]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4931, 0.5988, 0.4569, 0.5556, 0.6650, 0.5087, 0.5966, 0.5358, 0.5269]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_image_tensor(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) out_pil = output.images inputs = self.get_dummy_inputs(device) inputs["image"] = torch.tensor(np.array(inputs["image"]) / 127.5 - 1).permute(2, 0, 1).unsqueeze(0) inputs["mask_image"] = torch.tensor(np.array(inputs["mask_image"]) / 255).permute(2, 0, 1)[:1].unsqueeze(0) output = sd_pipe(**inputs) out_tensor = output.images assert out_pil.shape == (1, 64, 64, 3) assert np.abs(out_pil.flatten() - out_tensor.flatten()).max() < 5e-2 def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_stable_diffusion_inpaint_strength_zero_test(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) # check that the pipeline raises value error when num_inference_steps is < 1 inputs["strength"] = 0.01 with self.assertRaises(ValueError): sd_pipe(**inputs).images def test_stable_diffusion_inpaint_mask_latents(self): device = "cpu" components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(device) sd_pipe.set_progress_bar_config(disable=None) # normal mask + normal image ## `image`: pil, `mask_image``: pil, `masked_image_latents``: None inputs = self.get_dummy_inputs(device) inputs["strength"] = 0.9 out_0 = sd_pipe(**inputs).images # image latents + mask latents inputs = self.get_dummy_inputs(device) image = sd_pipe.image_processor.preprocess(inputs["image"]).to(sd_pipe.device) mask = sd_pipe.mask_processor.preprocess(inputs["mask_image"]).to(sd_pipe.device) masked_image = image * (mask < 0.5) generator = torch.Generator(device=device).manual_seed(0) image_latents = ( sd_pipe.vae.encode(image).latent_dist.sample(generator=generator) * sd_pipe.vae.config.scaling_factor ) torch.randn((1, 4, 32, 32), generator=generator) mask_latents = ( sd_pipe.vae.encode(masked_image).latent_dist.sample(generator=generator) * sd_pipe.vae.config.scaling_factor ) inputs["image"] = image_latents inputs["masked_image_latents"] = mask_latents inputs["mask_image"] = mask inputs["strength"] = 0.9 generator = torch.Generator(device=device).manual_seed(0) torch.randn((1, 4, 32, 32), generator=generator) inputs["generator"] = generator out_1 = sd_pipe(**inputs).images assert np.abs(out_0 - out_1).max() < 1e-2 def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(torch_device) prompt = "hey" num_inference_steps = 3 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, image=inputs["image"], mask_image=inputs["mask_image"], num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) class StableDiffusionSimpleInpaintPipelineFastTests(StableDiffusionInpaintPipelineFastTests): pipeline_class = StableDiffusionInpaintPipeline params = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS batch_params = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS image_params = frozenset([]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, ) scheduler = PNDMScheduler(skip_prk_steps=True) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, "image_encoder": None, } return components def get_dummy_inputs_2images(self, device, seed=0, img_res=64): # Get random floats in [0, 1] as image with spatial size (img_res, img_res) image1 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed)).to(device) image2 = floats_tensor((1, 3, img_res, img_res), rng=random.Random(seed + 22)).to(device) # Convert images to [-1, 1] init_image1 = 2.0 * image1 - 1.0 init_image2 = 2.0 * image2 - 1.0 # empty mask mask_image = torch.zeros((1, 1, img_res, img_res), device=device) if str(device).startswith("mps"): generator1 = torch.manual_seed(seed) generator2 = torch.manual_seed(seed) else: generator1 = torch.Generator(device=device).manual_seed(seed) generator2 = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": ["A painting of a squirrel eating a burger"] * 2, "image": [init_image1, init_image2], "mask_image": [mask_image] * 2, "generator": [generator1, generator2], "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "np", } return inputs def test_stable_diffusion_inpaint(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6584, 0.5424, 0.5649, 0.5449, 0.5897, 0.6111, 0.5404, 0.5463, 0.5214]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.6240, 0.5355, 0.5649, 0.5378, 0.5374, 0.6242, 0.5132, 0.5347, 0.5396]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_inpaint_2_images(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) # test to confirm if we pass two same image, we will get same output inputs = self.get_dummy_inputs(device) gen1 = torch.Generator(device=device).manual_seed(0) gen2 = torch.Generator(device=device).manual_seed(0) for name in ["prompt", "image", "mask_image"]: inputs[name] = [inputs[name]] * 2 inputs["generator"] = [gen1, gen2] images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() < 1e-4 # test to confirm that if we pass two different images, we will get different output inputs = self.get_dummy_inputs_2images(device) images = sd_pipe(**inputs).images assert images.shape == (2, 64, 64, 3) image_slice1 = images[0, -3:, -3:, -1] image_slice2 = images[1, -3:, -3:, -1] assert np.abs(image_slice1.flatten() - image_slice2.flatten()).max() > 1e-2 def test_stable_diffusion_inpaint_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionInpaintPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device, output_pil=False) half_dim = inputs["image"].shape[2] // 2 inputs["mask_image"][0, 0, :half_dim, :half_dim] = 0 inputs["num_inference_steps"] = 4 image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array( [[0.6387283, 0.5564158, 0.58631873, 0.5539942, 0.5494673, 0.6461868, 0.5251618, 0.5497595, 0.5508756]] ) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-4 @slow @require_torch_gpu class StableDiffusionInpaintPipelineSlowTests(unittest.TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_inpaint_ddim(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0427, 0.0460, 0.0483, 0.0460, 0.0584, 0.0521, 0.1549, 0.1695, 0.1794]) assert np.abs(expected_slice - image_slice).max() < 6e-4 def test_stable_diffusion_inpaint_fp16(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1509, 0.1245, 0.1672, 0.1655, 0.1519, 0.1226, 0.1462, 0.1567, 0.2451]) assert np.abs(expected_slice - image_slice).max() < 1e-1 def test_stable_diffusion_inpaint_pndm(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0425, 0.0273, 0.0344, 0.1694, 0.1727, 0.1812, 0.3256, 0.3311, 0.3272]) assert np.abs(expected_slice - image_slice).max() < 5e-3 def test_stable_diffusion_inpaint_k_lms(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.9314, 0.7575, 0.9432, 0.8885, 0.9028, 0.7298, 0.9811, 0.9667, 0.7633]) assert np.abs(expected_slice - image_slice).max() < 6e-3 def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16 ) pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 @require_python39_or_higher @require_torch_2 def test_inpaint_compile(self): seed = 0 inputs = self.get_inputs(torch_device, seed=seed) # Can't pickle a Generator object del inputs["generator"] inputs["torch_device"] = torch_device inputs["seed"] = seed run_test_in_subprocess(test_case=self, target_func=_test_inpaint_compile, inputs=inputs) def test_stable_diffusion_inpaint_pil_input_resolution_test(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input image to a random size (one that would cause a tensor mismatch error) inputs["image"] = inputs["image"].resize((127, 127)) inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) inputs["height"] = 128 inputs["width"] = 128 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, inputs["height"], inputs["width"], 3) def test_stable_diffusion_inpaint_strength_test(self): pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input strength inputs["strength"] = 0.75 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, 512, 512, 3) image_slice = image[0, 253:256, 253:256, -1].flatten() expected_slice = np.array([0.2728, 0.2803, 0.2665, 0.2511, 0.2774, 0.2586, 0.2391, 0.2392, 0.2582]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_simple_inpaint_ddim(self): pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3757, 0.3875, 0.4445, 0.4353, 0.3780, 0.4513, 0.3965, 0.3984, 0.4362]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_download_local(self): filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt") pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image_out = pipe(**inputs).images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-inpainting/blob/main/sd-v1-5-inpainting.ckpt" pipe = StableDiffusionInpaintPipeline.from_single_file(ckpt_path) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 5 image_ckpt = pipe(**inputs).images[0] pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_attn_processor(AttnProcessor()) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 5 image = pipe(**inputs).images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_ckpt.flatten()) assert max_diff < 1e-4 def test_single_file_component_configs(self): pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", variant="fp16") ckpt_path = "https://huggingface.co/runwayml/stable-diffusion-inpainting/blob/main/sd-v1-5-inpainting.ckpt" single_file_pipe = StableDiffusionInpaintPipeline.from_single_file(ckpt_path, load_safety_checker=True) for param_name, param_value in single_file_pipe.text_encoder.config.to_dict().items(): if param_name in ["torch_dtype", "architectures", "_name_or_path"]: continue assert pipe.text_encoder.config.to_dict()[param_name] == param_value PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "architectures", "_use_default_values"] for param_name, param_value in single_file_pipe.unet.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( pipe.unet.config[param_name] == param_value ), f"{param_name} is differs between single file loading and pretrained loading" for param_name, param_value in single_file_pipe.vae.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( pipe.vae.config[param_name] == param_value ), f"{param_name} is differs between single file loading and pretrained loading" for param_name, param_value in single_file_pipe.safety_checker.config.to_dict().items(): if param_name in PARAMS_TO_IGNORE: continue assert ( pipe.safety_checker.config.to_dict()[param_name] == param_value ), f"{param_name} is differs between single file loading and pretrained loading" @slow @require_torch_gpu class StableDiffusionInpaintPipelineAsymmetricAutoencoderKLSlowTests(unittest.TestCase): def setUp(self): super().setUp() def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_stable_diffusion_inpaint_ddim(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.vae = vae pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0522, 0.0604, 0.0596, 0.0449, 0.0493, 0.0427, 0.1186, 0.1289, 0.1442]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_stable_diffusion_inpaint_fp16(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16, safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device, dtype=torch.float16) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.1343, 0.1406, 0.1440, 0.1504, 0.1729, 0.0989, 0.1807, 0.2822, 0.1179]) assert np.abs(expected_slice - image_slice).max() < 5e-2 def test_stable_diffusion_inpaint_pndm(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = PNDMScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.0966, 0.1083, 0.1148, 0.1422, 0.1318, 0.1197, 0.3702, 0.3537, 0.3288]) assert np.abs(expected_slice - image_slice).max() < 5e-3 def test_stable_diffusion_inpaint_k_lms(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.8931, 0.8683, 0.8965, 0.8501, 0.8592, 0.9118, 0.8734, 0.7463, 0.8990]) assert np.abs(expected_slice - image_slice).max() < 6e-3 def test_stable_diffusion_inpaint_with_sequential_cpu_offloading(self): torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None, torch_dtype=torch.float16 ) pipe.vae = vae pipe = pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() inputs = self.get_inputs(torch_device, dtype=torch.float16) _ = pipe(**inputs) mem_bytes = torch.cuda.max_memory_allocated() # make sure that less than 2.45 GB is allocated assert mem_bytes < 2.45 * 10**9 @require_python39_or_higher @require_torch_2 def test_inpaint_compile(self): pass def test_stable_diffusion_inpaint_pil_input_resolution_test(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", ) pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input image to a random size (one that would cause a tensor mismatch error) inputs["image"] = inputs["image"].resize((127, 127)) inputs["mask_image"] = inputs["mask_image"].resize((127, 127)) inputs["height"] = 128 inputs["width"] = 128 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, inputs["height"], inputs["width"], 3) def test_stable_diffusion_inpaint_strength_test(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting", safety_checker=None ) pipe.unet.set_default_attn_processor() pipe.vae = vae pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) # change input strength inputs["strength"] = 0.75 image = pipe(**inputs).images # verify that the returned image has the same height and width as the input height and width assert image.shape == (1, 512, 512, 3) image_slice = image[0, 253:256, 253:256, -1].flatten() expected_slice = np.array([0.2458, 0.2576, 0.3124, 0.2679, 0.2669, 0.2796, 0.2872, 0.2975, 0.2661]) assert np.abs(expected_slice - image_slice).max() < 3e-3 def test_stable_diffusion_simple_inpaint_ddim(self): vae = AsymmetricAutoencoderKL.from_pretrained("cross-attention/asymmetric-autoencoder-kl-x-1-5") pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", safety_checker=None) pipe.vae = vae pipe.unet.set_default_attn_processor() pipe.to(torch_device) pipe.set_progress_bar_config(disable=None) pipe.enable_attention_slicing() inputs = self.get_inputs(torch_device) image = pipe(**inputs).images image_slice = image[0, 253:256, 253:256, -1].flatten() assert image.shape == (1, 512, 512, 3) expected_slice = np.array([0.3296, 0.4041, 0.4097, 0.4145, 0.4342, 0.4152, 0.4927, 0.4931, 0.4430]) assert np.abs(expected_slice - image_slice).max() < 1e-3 def test_download_local(self): vae = AsymmetricAutoencoderKL.from_pretrained( "cross-attention/asymmetric-autoencoder-kl-x-1-5", torch_dtype=torch.float16 ) filename = hf_hub_download("runwayml/stable-diffusion-inpainting", filename="sd-v1-5-inpainting.ckpt") pipe = StableDiffusionInpaintPipeline.from_single_file(filename, torch_dtype=torch.float16) pipe.vae = vae pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 1 image_out = pipe(**inputs).images[0] assert image_out.shape == (512, 512, 3) def test_download_ckpt_diff_format_is_same(self): pass @nightly @require_torch_gpu class StableDiffusionInpaintPipelineNightlyTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def get_inputs(self, device, generator_device="cpu", dtype=torch.float32, seed=0): generator = torch.Generator(device=generator_device).manual_seed(seed) init_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_image.png" ) mask_image = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/input_bench_mask.png" ) inputs = { "prompt": "Face of a yellow cat, high resolution, sitting on a park bench", "image": init_image, "mask_image": mask_image, "generator": generator, "num_inference_steps": 50, "guidance_scale": 7.5, "output_type": "np", } return inputs def test_inpaint_ddim(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_ddim.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_pndm(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = PNDMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_pndm.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_lms(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_lms.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 def test_inpaint_dpm(self): sd_pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") sd_pipe.scheduler = DPMSolverMultistepScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_inputs(torch_device) inputs["num_inference_steps"] = 30 image = sd_pipe(**inputs).images[0] expected_image = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main" "/stable_diffusion_inpaint/stable_diffusion_inpaint_dpm_multi.npy" ) max_diff = np.abs(expected_image - image).max() assert max_diff < 1e-3 class StableDiffusionInpaintingPrepareMaskAndMaskedImageTests(unittest.TestCase): def test_pil_inputs(self): height, width = 32, 32 im = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8) im = Image.fromarray(im) mask = np.random.randint(0, 255, (height, width), dtype=np.uint8) > 127.5 mask = Image.fromarray((mask * 255).astype(np.uint8)) t_mask, t_masked, t_image = prepare_mask_and_masked_image(im, mask, height, width, return_image=True) self.assertTrue(isinstance(t_mask, torch.Tensor)) self.assertTrue(isinstance(t_masked, torch.Tensor)) self.assertTrue(isinstance(t_image, torch.Tensor)) self.assertEqual(t_mask.ndim, 4) self.assertEqual(t_masked.ndim, 4) self.assertEqual(t_image.ndim, 4) self.assertEqual(t_mask.shape, (1, 1, height, width)) self.assertEqual(t_masked.shape, (1, 3, height, width)) self.assertEqual(t_image.shape, (1, 3, height, width)) self.assertTrue(t_mask.dtype == torch.float32) self.assertTrue(t_masked.dtype == torch.float32) self.assertTrue(t_image.dtype == torch.float32) self.assertTrue(t_mask.min() >= 0.0) self.assertTrue(t_mask.max() <= 1.0) self.assertTrue(t_masked.min() >= -1.0) self.assertTrue(t_masked.min() <= 1.0) self.assertTrue(t_image.min() >= -1.0) self.assertTrue(t_image.min() >= -1.0) self.assertTrue(t_mask.sum() > 0.0) def test_np_inputs(self): height, width = 32, 32 im_np = np.random.randint(0, 255, (height, width, 3), dtype=np.uint8) im_pil = Image.fromarray(im_np) mask_np = ( np.random.randint( 0, 255, ( height, width, ), dtype=np.uint8, ) > 127.5 ) mask_pil = Image.fromarray((mask_np * 255).astype(np.uint8)) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) t_mask_pil, t_masked_pil, t_image_pil = prepare_mask_and_masked_image( im_pil, mask_pil, height, width, return_image=True ) self.assertTrue((t_mask_np == t_mask_pil).all()) self.assertTrue((t_masked_np == t_masked_pil).all()) self.assertTrue((t_image_np == t_image_pil).all()) def test_torch_3D_2D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy().transpose(1, 2, 0) mask_np = mask_tensor.numpy() t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_3D_3D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy().transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_2D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy() t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_3D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_4D_4D_inputs(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 1, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 1, 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_np = im_tensor.numpy()[0].transpose(1, 2, 0) mask_np = mask_tensor.numpy()[0][0] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) t_mask_np, t_masked_np, t_image_np = prepare_mask_and_masked_image( im_np, mask_np, height, width, return_image=True ) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_batch_4D_3D(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 2, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 2, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor] mask_nps = [mask.numpy() for mask in mask_tensor] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)] t_mask_np = torch.cat([n[0] for n in nps]) t_masked_np = torch.cat([n[1] for n in nps]) t_image_np = torch.cat([n[2] for n in nps]) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_torch_batch_4D_4D(self): height, width = 32, 32 im_tensor = torch.randint( 0, 255, ( 2, 3, height, width, ), dtype=torch.uint8, ) mask_tensor = ( torch.randint( 0, 255, ( 2, 1, height, width, ), dtype=torch.uint8, ) > 127.5 ) im_nps = [im.numpy().transpose(1, 2, 0) for im in im_tensor] mask_nps = [mask.numpy()[0] for mask in mask_tensor] t_mask_tensor, t_masked_tensor, t_image_tensor = prepare_mask_and_masked_image( im_tensor / 127.5 - 1, mask_tensor, height, width, return_image=True ) nps = [prepare_mask_and_masked_image(i, m, height, width, return_image=True) for i, m in zip(im_nps, mask_nps)] t_mask_np = torch.cat([n[0] for n in nps]) t_masked_np = torch.cat([n[1] for n in nps]) t_image_np = torch.cat([n[2] for n in nps]) self.assertTrue((t_mask_tensor == t_mask_np).all()) self.assertTrue((t_masked_tensor == t_masked_np).all()) self.assertTrue((t_image_tensor == t_image_np).all()) def test_shape_mismatch(self): height, width = 32, 32 # test height and width with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 3, height, width, ), torch.randn(64, 64), height, width, return_image=True, ) # test batch dim with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 2, 3, height, width, ), torch.randn(4, 64, 64), height, width, return_image=True, ) # test batch dim with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.randn( 2, 3, height, width, ), torch.randn(4, 1, 64, 64), height, width, return_image=True, ) def test_type_mismatch(self): height, width = 32, 32 # test tensors-only with self.assertRaises(TypeError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.rand( 3, height, width, ).numpy(), height, width, return_image=True, ) # test tensors-only with self.assertRaises(TypeError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ).numpy(), torch.rand( 3, height, width, ), height, width, return_image=True, ) def test_channels_first(self): height, width = 32, 32 # test channels first for 3D tensors with self.assertRaises(AssertionError): prepare_mask_and_masked_image( torch.rand(height, width, 3), torch.rand( 3, height, width, ), height, width, return_image=True, ) def test_tensor_range(self): height, width = 32, 32 # test im <= 1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.ones( 3, height, width, ) * 2, torch.rand( height, width, ), height, width, return_image=True, ) # test im >= -1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.ones( 3, height, width, ) * (-2), torch.rand( height, width, ), height, width, return_image=True, ) # test mask <= 1 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.ones( height, width, ) * 2, height, width, return_image=True, ) # test mask >= 0 with self.assertRaises(ValueError): prepare_mask_and_masked_image( torch.rand( 3, height, width, ), torch.ones( height, width, ) * -1, height, width, return_image=True, )
diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion/test_stable_diffusion_inpaint.py", "repo_id": "diffusers", "token_count": 31962 }
147
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, StableDiffusionGLIGENPipeline, UNet2DConditionModel, ) from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class GligenPipelineFastTests( PipelineLatentTesterMixin, PipelineKarrasSchedulerTesterMixin, PipelineTesterMixin, unittest.TestCase ): pipeline_class = StableDiffusionGLIGENPipeline params = TEXT_TO_IMAGE_PARAMS | {"gligen_phrases", "gligen_boxes"} batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS def get_dummy_components(self): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), cross_attention_dim=32, attention_type="gated", ) # unet.position_net = PositionNet(32,32) scheduler = DDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A modern livingroom", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "gligen_phrases": ["a birthday cake"], "gligen_boxes": [[0.2676, 0.6088, 0.4773, 0.7183]], "output_type": "np", } return inputs def test_stable_diffusion_gligen_default_case(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5069, 0.5561, 0.4577, 0.4792, 0.5203, 0.4089, 0.5039, 0.4919, 0.4499]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_gligen_k_euler_ancestral(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionGLIGENPipeline(**components) sd_pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) output = sd_pipe(**inputs) image = output.images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.425, 0.494, 0.429, 0.469, 0.525, 0.417, 0.533, 0.5, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(batch_size=3, expected_max_diff=3e-3)
diffusers/tests/pipelines/stable_diffusion_gligen/test_stable_diffusion_gligen.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_gligen/test_stable_diffusion_gligen.py", "repo_id": "diffusers", "token_count": 2745 }
148
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import gc import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DPMSolverMultistepScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, LCMScheduler, StableDiffusionXLImg2ImgPipeline, StableDiffusionXLPipeline, UNet2DConditionModel, UniPCMultistepScheduler, ) from diffusers.utils.testing_utils import ( enable_full_determinism, load_image, numpy_cosine_similarity_distance, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import ( TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS, ) from ..test_pipelines_common import ( IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDFunctionTesterMixin, SDXLOptionalComponentsTesterMixin, ) enable_full_determinism() class StableDiffusionXLPipelineFastTests( SDFunctionTesterMixin, IPAdapterTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, SDXLOptionalComponentsTesterMixin, unittest.TestCase, ): pipeline_class = StableDiffusionXLPipeline params = TEXT_TO_IMAGE_PARAMS batch_params = TEXT_TO_IMAGE_BATCH_PARAMS image_params = TEXT_TO_IMAGE_IMAGE_PARAMS image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS callback_cfg_params = TEXT_TO_IMAGE_CALLBACK_CFG_PARAMS.union({"add_text_embeds", "add_time_ids"}) def get_dummy_components(self, time_cond_proj_dim=None): torch.manual_seed(0) unet = UNet2DConditionModel( block_out_channels=(2, 4), layers_per_block=2, time_cond_proj_dim=time_cond_proj_dim, sample_size=32, in_channels=4, out_channels=4, down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"), up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"), # SD2-specific config below attention_head_dim=(2, 4), use_linear_projection=True, addition_embed_type="text_time", addition_time_embed_dim=8, transformer_layers_per_block=(1, 2), projection_class_embeddings_input_dim=80, # 6 * 8 + 32 cross_attention_dim=64, norm_num_groups=1, ) scheduler = EulerDiscreteScheduler( beta_start=0.00085, beta_end=0.012, steps_offset=1, beta_schedule="scaled_linear", timestep_spacing="leading", ) torch.manual_seed(0) vae = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"], up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"], latent_channels=4, sample_size=128, ) torch.manual_seed(0) text_encoder_config = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, # SD2-specific config below hidden_act="gelu", projection_dim=32, ) text_encoder = CLIPTextModel(text_encoder_config) tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") text_encoder_2 = CLIPTextModelWithProjection(text_encoder_config) tokenizer_2 = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip") components = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "text_encoder_2": text_encoder_2, "tokenizer_2": tokenizer_2, "image_encoder": None, "feature_extractor": None, } return components def get_dummy_inputs(self, device, seed=0): if str(device).startswith("mps"): generator = torch.manual_seed(seed) else: generator = torch.Generator(device=device).manual_seed(seed) inputs = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 5.0, "output_type": "np", } return inputs def test_stable_diffusion_xl_euler(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.5552, 0.5569, 0.4725, 0.4348, 0.4994, 0.4632, 0.5142, 0.5012, 0.47]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_euler_lcm(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_euler_lcm_custom_timesteps(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components(time_cond_proj_dim=256) sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) del inputs["num_inference_steps"] inputs["timesteps"] = [999, 499] image = sd_pipe(**inputs).images image_slice = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) expected_slice = np.array([0.4917, 0.6555, 0.4348, 0.5219, 0.7324, 0.4855, 0.5168, 0.5447, 0.5156]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def test_stable_diffusion_xl_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) inputs["prompt"] = 2 * [inputs["prompt"]] inputs["num_images_per_prompt"] = 2 output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) prompt = 2 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_xl_negative_prompt_embeds(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] inputs["negative_prompt"] = negative_prompt inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds inputs = self.get_dummy_inputs(torch_device) negative_prompt = 3 * ["this is a negative prompt"] prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ) = sd_pipe.encode_prompt(prompt, negative_prompt=negative_prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, negative_prompt_embeds=negative_prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, negative_pooled_prompt_embeds=negative_pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_attention_slicing_forward_pass(self): super().test_attention_slicing_forward_pass(expected_max_diff=3e-3) def test_inference_batch_single_identical(self): super().test_inference_batch_single_identical(expected_max_diff=3e-3) def test_save_load_optional_components(self): self._test_save_load_optional_components() @require_torch_gpu def test_stable_diffusion_xl_offloads(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.enable_model_cpu_offload() pipes.append(sd_pipe) components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe.enable_sequential_cpu_offload() pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 assert np.abs(image_slices[0] - image_slices[2]).max() < 1e-3 def test_stable_diffusion_xl_img2img_prompt_embeds_only(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) # forward without prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) inputs["prompt"] = 3 * [inputs["prompt"]] output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with prompt embeds generator_device = "cpu" inputs = self.get_dummy_inputs(generator_device) prompt = 3 * [inputs.pop("prompt")] ( prompt_embeds, _, pooled_prompt_embeds, _, ) = sd_pipe.encode_prompt(prompt) output = sd_pipe( **inputs, prompt_embeds=prompt_embeds, pooled_prompt_embeds=pooled_prompt_embeds, ) image_slice_2 = output.images[0, -3:, -3:, -1] # make sure that it's equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 def test_stable_diffusion_two_xl_mixture_of_denoiser_fast(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, expected_tss, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = { **inputs, **{ "denoising_end": 1.0 - (split / num_train_timesteps), "output_type": "latent", }, } latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = { **inputs, **{ "denoising_start": 1.0 - (split / num_train_timesteps), "image": latents, }, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" steps = 10 for split in [300, 700]: for scheduler_cls_timesteps in [ (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), ( HeunDiscreteScheduler, [ 901.0, 801.0, 801.0, 701.0, 701.0, 601.0, 601.0, 501.0, 501.0, 401.0, 401.0, 301.0, 301.0, 201.0, 201.0, 101.0, 101.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) @slow def test_stable_diffusion_two_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split, scheduler_cls_orig, expected_tss, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = expected_steps_1[-1:] + list(filter(lambda ts: ts < split, expected_tss)) expected_steps = expected_steps_1 + expected_steps_2 else: expected_steps_1 = list(filter(lambda ts: ts >= split, expected_tss)) expected_steps_2 = list(filter(lambda ts: ts < split, expected_tss)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = { **inputs, **{ "denoising_end": 1.0 - (split / num_train_timesteps), "output_type": "latent", }, } latents = pipe_1(**inputs_1).images[0] assert expected_steps_1 == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" inputs_2 = { **inputs, **{ "denoising_start": 1.0 - (split / num_train_timesteps), "image": latents, }, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] assert expected_steps == done_steps, f"Failure with {scheduler_cls.__name__} and {num_steps} and {split}" steps = 10 for split in [300, 500, 700]: for scheduler_cls_timesteps in [ (DDIMScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), (EulerDiscreteScheduler, [901, 801, 701, 601, 501, 401, 301, 201, 101, 1]), (DPMSolverMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), (UniPCMultistepScheduler, [901, 811, 721, 631, 541, 451, 361, 271, 181, 91]), ( HeunDiscreteScheduler, [ 901.0, 801.0, 801.0, 701.0, 701.0, 601.0, 601.0, 501.0, 501.0, 401.0, 401.0, 301.0, 301.0, 201.0, 201.0, 101.0, 101.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) steps = 25 for split in [300, 500, 700]: for scheduler_cls_timesteps in [ ( DDIMScheduler, [ 961, 921, 881, 841, 801, 761, 721, 681, 641, 601, 561, 521, 481, 441, 401, 361, 321, 281, 241, 201, 161, 121, 81, 41, 1, ], ), ( EulerDiscreteScheduler, [ 961.0, 921.0, 881.0, 841.0, 801.0, 761.0, 721.0, 681.0, 641.0, 601.0, 561.0, 521.0, 481.0, 441.0, 401.0, 361.0, 321.0, 281.0, 241.0, 201.0, 161.0, 121.0, 81.0, 41.0, 1.0, ], ), ( DPMSolverMultistepScheduler, [ 951, 913, 875, 837, 799, 761, 723, 685, 647, 609, 571, 533, 495, 457, 419, 381, 343, 305, 267, 229, 191, 153, 115, 77, 39, ], ), ( UniPCMultistepScheduler, [ 951, 913, 875, 837, 799, 761, 723, 685, 647, 609, 571, 533, 495, 457, 419, 381, 343, 305, 267, 229, 191, 153, 115, 77, 39, ], ), ( HeunDiscreteScheduler, [ 961.0, 921.0, 921.0, 881.0, 881.0, 841.0, 841.0, 801.0, 801.0, 761.0, 761.0, 721.0, 721.0, 681.0, 681.0, 641.0, 641.0, 601.0, 601.0, 561.0, 561.0, 521.0, 521.0, 481.0, 481.0, 441.0, 441.0, 401.0, 401.0, 361.0, 361.0, 321.0, 321.0, 281.0, 281.0, 241.0, 241.0, 201.0, 201.0, 161.0, 161.0, 121.0, 121.0, 81.0, 81.0, 41.0, 41.0, 1.0, 1.0, ], ), ]: assert_run_mixture(steps, split, scheduler_cls_timesteps[0], scheduler_cls_timesteps[1]) @slow def test_stable_diffusion_three_xl_mixture_of_denoiser(self): components = self.get_dummy_components() pipe_1 = StableDiffusionXLPipeline(**components).to(torch_device) pipe_1.unet.set_default_attn_processor() pipe_2 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_2.unet.set_default_attn_processor() pipe_3 = StableDiffusionXLImg2ImgPipeline(**components).to(torch_device) pipe_3.unet.set_default_attn_processor() def assert_run_mixture( num_steps, split_1, split_2, scheduler_cls_orig, num_train_timesteps=pipe_1.scheduler.config.num_train_timesteps, ): inputs = self.get_dummy_inputs(torch_device) inputs["num_inference_steps"] = num_steps class scheduler_cls(scheduler_cls_orig): pass pipe_1.scheduler = scheduler_cls.from_config(pipe_1.scheduler.config) pipe_2.scheduler = scheduler_cls.from_config(pipe_2.scheduler.config) pipe_3.scheduler = scheduler_cls.from_config(pipe_3.scheduler.config) # Let's retrieve the number of timesteps we want to use pipe_1.scheduler.set_timesteps(num_steps) expected_steps = pipe_1.scheduler.timesteps.tolist() split_1_ts = num_train_timesteps - int(round(num_train_timesteps * split_1)) split_2_ts = num_train_timesteps - int(round(num_train_timesteps * split_2)) if pipe_1.scheduler.order == 2: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = expected_steps_1[-1:] + list( filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps) ) expected_steps_3 = expected_steps_2[-1:] + list(filter(lambda ts: ts < split_2_ts, expected_steps)) expected_steps = expected_steps_1 + expected_steps_2 + expected_steps_3 else: expected_steps_1 = list(filter(lambda ts: ts >= split_1_ts, expected_steps)) expected_steps_2 = list(filter(lambda ts: ts >= split_2_ts and ts < split_1_ts, expected_steps)) expected_steps_3 = list(filter(lambda ts: ts < split_2_ts, expected_steps)) # now we monkey patch step `done_steps` # list into the step function for testing done_steps = [] old_step = copy.copy(scheduler_cls.step) def new_step(self, *args, **kwargs): done_steps.append(args[1].cpu().item()) # args[1] is always the passed `t` return old_step(self, *args, **kwargs) scheduler_cls.step = new_step inputs_1 = {**inputs, **{"denoising_end": split_1, "output_type": "latent"}} latents = pipe_1(**inputs_1).images[0] assert ( expected_steps_1 == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" with self.assertRaises(ValueError) as cm: inputs_2 = { **inputs, **{ "denoising_start": split_2, "denoising_end": split_1, "image": latents, "output_type": "latent", }, } pipe_2(**inputs_2).images[0] assert "cannot be larger than or equal to `denoising_end`" in str(cm.exception) inputs_2 = { **inputs, **{"denoising_start": split_1, "denoising_end": split_2, "image": latents, "output_type": "latent"}, } pipe_2(**inputs_2).images[0] assert expected_steps_2 == done_steps[len(expected_steps_1) :] inputs_3 = {**inputs, **{"denoising_start": split_2, "image": latents}} pipe_3(**inputs_3).images[0] assert expected_steps_3 == done_steps[len(expected_steps_1) + len(expected_steps_2) :] assert ( expected_steps == done_steps ), f"Failure with {scheduler_cls.__name__} and {num_steps} and {split_1} and {split_2}" for steps in [7, 11, 20]: for split_1, split_2 in zip([0.19, 0.32], [0.81, 0.68]): for scheduler_cls in [ DDIMScheduler, EulerDiscreteScheduler, DPMSolverMultistepScheduler, UniPCMultistepScheduler, HeunDiscreteScheduler, ]: assert_run_mixture(steps, split_1, split_2, scheduler_cls) def test_stable_diffusion_xl_multi_prompts(self): components = self.get_dummy_components() sd_pipe = self.pipeline_class(**components).to(torch_device) # forward with single prompt inputs = self.get_dummy_inputs(torch_device) output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = inputs["prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different prompt inputs = self.get_dummy_inputs(torch_device) inputs["prompt_2"] = "different prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 # manually set a negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" output = sd_pipe(**inputs) image_slice_1 = output.images[0, -3:, -3:, -1] # forward with same negative_prompt duplicated inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = inputs["negative_prompt"] output = sd_pipe(**inputs) image_slice_2 = output.images[0, -3:, -3:, -1] # ensure the results are equal assert np.abs(image_slice_1.flatten() - image_slice_2.flatten()).max() < 1e-4 # forward with different negative_prompt inputs = self.get_dummy_inputs(torch_device) inputs["negative_prompt"] = "negative prompt" inputs["negative_prompt_2"] = "different negative prompt" output = sd_pipe(**inputs) image_slice_3 = output.images[0, -3:, -3:, -1] # ensure the results are not equal assert np.abs(image_slice_1.flatten() - image_slice_3.flatten()).max() > 1e-4 def test_stable_diffusion_xl_negative_conditions(self): device = "cpu" # ensure determinism for the device-dependent torch.Generator components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(device) sd_pipe.set_progress_bar_config(disable=None) inputs = self.get_dummy_inputs(device) image = sd_pipe(**inputs).images image_slice_with_no_neg_cond = image[0, -3:, -3:, -1] image = sd_pipe( **inputs, negative_original_size=(512, 512), negative_crops_coords_top_left=(0, 0), negative_target_size=(1024, 1024), ).images image_slice_with_neg_cond = image[0, -3:, -3:, -1] self.assertTrue(np.abs(image_slice_with_no_neg_cond - image_slice_with_neg_cond).max() > 1e-2) def test_stable_diffusion_xl_save_from_pretrained(self): pipes = [] components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components).to(torch_device) pipes.append(sd_pipe) with tempfile.TemporaryDirectory() as tmpdirname: sd_pipe.save_pretrained(tmpdirname) sd_pipe = StableDiffusionXLPipeline.from_pretrained(tmpdirname).to(torch_device) pipes.append(sd_pipe) image_slices = [] for pipe in pipes: pipe.unet.set_default_attn_processor() inputs = self.get_dummy_inputs(torch_device) image = pipe(**inputs).images image_slices.append(image[0, -3:, -3:, -1].flatten()) assert np.abs(image_slices[0] - image_slices[1]).max() < 1e-3 def test_pipeline_interrupt(self): components = self.get_dummy_components() sd_pipe = StableDiffusionXLPipeline(**components) sd_pipe = sd_pipe.to(torch_device) sd_pipe.set_progress_bar_config(disable=None) prompt = "hey" num_inference_steps = 3 # store intermediate latents from the generation process class PipelineState: def __init__(self): self.state = [] def apply(self, pipe, i, t, callback_kwargs): self.state.append(callback_kwargs["latents"]) return callback_kwargs pipe_state = PipelineState() sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="np", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=pipe_state.apply, ).images # interrupt generation at step index interrupt_step_idx = 1 def callback_on_step_end(pipe, i, t, callback_kwargs): if i == interrupt_step_idx: pipe._interrupt = True return callback_kwargs output_interrupted = sd_pipe( prompt, num_inference_steps=num_inference_steps, output_type="latent", generator=torch.Generator("cpu").manual_seed(0), callback_on_step_end=callback_on_step_end, ).images # fetch intermediate latents at the interrupted step # from the completed generation process intermediate_latent = pipe_state.state[interrupt_step_idx] # compare the intermediate latent to the output of the interrupted process # they should be the same assert torch.allclose(intermediate_latent, output_interrupted, atol=1e-4) @slow class StableDiffusionXLPipelineIntegrationTests(unittest.TestCase): def tearDown(self): super().tearDown() gc.collect() torch.cuda.empty_cache() def test_stable_diffusion_lcm(self): torch.manual_seed(0) unet = UNet2DConditionModel.from_pretrained( "latent-consistency/lcm-ssd-1b", torch_dtype=torch.float16, variant="fp16" ) sd_pipe = StableDiffusionXLPipeline.from_pretrained( "segmind/SSD-1B", unet=unet, torch_dtype=torch.float16, variant="fp16" ).to(torch_device) sd_pipe.scheduler = LCMScheduler.from_config(sd_pipe.scheduler.config) sd_pipe.set_progress_bar_config(disable=None) prompt = "a red car standing on the side of the street" image = sd_pipe(prompt, num_inference_steps=4, guidance_scale=8.0).images[0] expected_image = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/lcm_full/stable_diffusion_ssd_1b_lcm.png" ) image = sd_pipe.image_processor.pil_to_numpy(image) expected_image = sd_pipe.image_processor.pil_to_numpy(expected_image) max_diff = numpy_cosine_similarity_distance(image.flatten(), expected_image.flatten()) assert max_diff < 1e-2 def test_download_ckpt_diff_format_is_same(self): ckpt_path = ( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" ) pipe = StableDiffusionXLPipeline.from_single_file(ckpt_path, torch_dtype=torch.float16) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image_ckpt = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config) pipe.unet.set_default_attn_processor() pipe.enable_model_cpu_offload() generator = torch.Generator(device="cpu").manual_seed(0) image = pipe("a turtle", num_inference_steps=2, generator=generator, output_type="np").images[0] max_diff = numpy_cosine_similarity_distance(image.flatten(), image_ckpt.flatten()) assert max_diff < 6e-3 def test_single_file_component_configs(self): pipe = StableDiffusionXLPipeline.from_pretrained( "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16 ) ckpt_path = ( "https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0/blob/main/sd_xl_base_1.0.safetensors" ) single_file_pipe = StableDiffusionXLPipeline.from_single_file( ckpt_path, variant="fp16", torch_dtype=torch.float16 ) for param_name, param_value in single_file_pipe.text_encoder.config.to_dict().items(): if param_name in ["torch_dtype", "architectures", "_name_or_path"]: continue assert pipe.text_encoder.config.to_dict()[param_name] == param_value for param_name, param_value in single_file_pipe.text_encoder_2.config.to_dict().items(): if param_name in ["torch_dtype", "architectures", "_name_or_path"]: continue assert pipe.text_encoder_2.config.to_dict()[param_name] == param_value PARAMS_TO_IGNORE = ["torch_dtype", "_name_or_path", "architectures", "_use_default_values"] for param_name, param_value in single_file_pipe.unet.config.items(): if param_name in PARAMS_TO_IGNORE: continue if param_name == "upcast_attention" and pipe.unet.config[param_name] is None: pipe.unet.config[param_name] = False assert ( pipe.unet.config[param_name] == param_value ), f"{param_name} is differs between single file loading and pretrained loading" for param_name, param_value in single_file_pipe.vae.config.items(): if param_name in PARAMS_TO_IGNORE: continue assert ( pipe.vae.config[param_name] == param_value ), f"{param_name} is differs between single file loading and pretrained loading"
diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py/0
{ "file_path": "diffusers/tests/pipelines/stable_diffusion_xl/test_stable_diffusion_xl.py", "repo_id": "diffusers", "token_count": 22841 }
149
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class DownloadTests(unittest.TestCase): def test_download_only_pytorch(self): with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _ = FlaxDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname ) all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname, os.listdir(tmpdirname)[0], "snapshots"))] files = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(".bin") for f in files) @slow @require_flax class FlaxPipelineTests(unittest.TestCase): def test_dummy_all_tpus(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 4 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 64, 64, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 4.1514745) < 1e-3 assert np.abs(np.abs(images, dtype=np.float32).sum() - 49947.875) < 5e-1 images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) assert len(images_pil) == num_samples def test_stable_diffusion_v1_4(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="flax", safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.05652401)) < 1e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2383808.2)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16_with_safety(self): pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16 ) prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.04003906)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2373516.75)) < 5e-1 def test_stable_diffusion_v1_4_bfloat_16_ddim(self): scheduler = FlaxDDIMScheduler( beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", set_alpha_to_one=False, steps_offset=1, ) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, scheduler=scheduler, safety_checker=None, ) scheduler_state = scheduler.create_state() params["scheduler"] = scheduler_state prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) prng_seed = jax.random.PRNGKey(0) num_inference_steps = 50 num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) # shard inputs and rng params = replicate(params) prng_seed = jax.random.split(prng_seed, num_samples) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:], dtype=np.float32).sum() - 0.045043945)) < 5e-2 assert np.abs((np.abs(images, dtype=np.float32).sum() - 2347693.5)) < 5e-1 def test_jax_memory_efficient_attention(self): prompt = ( "A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of" " field, close up, split lighting, cinematic" ) num_samples = jax.device_count() prompt = num_samples * [prompt] prng_seed = jax.random.split(jax.random.PRNGKey(0), num_samples) pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None, ) params = replicate(params) prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, params, prng_seed, jit=True).images assert images.shape == (num_samples, 1, 512, 512, 3) slice = images[2, 0, 256, 10:17, 1] # With memory efficient attention pipeline, params = FlaxStableDiffusionPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16, safety_checker=None, use_memory_efficient_attention=True, ) params = replicate(params) prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images_eff = pipeline(prompt_ids, params, prng_seed, jit=True).images assert images_eff.shape == (num_samples, 1, 512, 512, 3) slice_eff = images[2, 0, 256, 10:17, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice).max() < 1e-2
diffusers/tests/pipelines/test_pipelines_flax.py/0
{ "file_path": "diffusers/tests/pipelines/test_pipelines_flax.py", "repo_id": "diffusers", "token_count": 4559 }
150
# coding=utf-8 # Copyright 2024 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import tempfile import unittest from typing import Dict, List, Tuple from diffusers import FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxPNDMScheduler from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from jax import random jax_device = jax.default_backend() @require_flax class FlaxSchedulerCommonTest(unittest.TestCase): scheduler_classes = () forward_default_kwargs = () @property def dummy_sample(self): batch_size = 4 num_channels = 3 height = 8 width = 8 key1, key2 = random.split(random.PRNGKey(0)) sample = random.uniform(key1, (batch_size, num_channels, height, width)) return sample, key2 @property def dummy_sample_deter(self): batch_size = 4 num_channels = 3 height = 8 width = 8 num_elems = batch_size * num_channels * height * width sample = jnp.arange(num_elems) sample = sample.reshape(num_channels, height, width, batch_size) sample = sample / num_elems return jnp.transpose(sample, (3, 0, 1, 2)) def get_scheduler_config(self): raise NotImplementedError def dummy_model(self): def model(sample, t, *args): return sample * t / (t + 1) return model def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, key = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) kwargs.update(forward_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, key = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, time_step, sample, key, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, time_step, sample, key, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, key = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, 1, sample, key, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, key = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output_0 = scheduler.step(state, residual, 0, sample, key, **kwargs).prev_sample output_1 = scheduler.step(state, residual, 1, sample, key, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): return t.at[t != t].set(0) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), msg=( "Tuple and dict output are not equal. Difference:" f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, key = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_dict = scheduler.step(state, residual, 0, sample, key, **kwargs) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_tuple = scheduler.step(state, residual, 0, sample, key, return_dict=False, **kwargs) recursive_check(outputs_tuple[0], outputs_dict.prev_sample) def test_deprecated_kwargs(self): for scheduler_class in self.scheduler_classes: has_kwarg_in_model_class = "kwargs" in inspect.signature(scheduler_class.__init__).parameters has_deprecated_kwarg = len(scheduler_class._deprecated_kwargs) > 0 if has_kwarg_in_model_class and not has_deprecated_kwarg: raise ValueError( f"{scheduler_class} has `**kwargs` in its __init__ method but has not defined any deprecated" " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if" " there are no deprecated arguments or add the deprecated argument with `_deprecated_kwargs =" " [<deprecated_argument>]`" ) if not has_kwarg_in_model_class and has_deprecated_kwarg: raise ValueError( f"{scheduler_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated" " kwargs under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs`" f" argument to {self.model_class}.__init__ if there are deprecated arguments or remove the" " deprecated argument from `_deprecated_kwargs = [<deprecated_argument>]`" ) @require_flax class FlaxDDPMSchedulerTest(FlaxSchedulerCommonTest): scheduler_classes = (FlaxDDPMScheduler,) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", "variance_type": "fixed_small", "clip_sample": True, } config.update(**kwargs) return config def test_timesteps(self): for timesteps in [1, 5, 100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_variance_type(self): for variance in ["fixed_small", "fixed_large", "other"]: self.check_over_configs(variance_type=variance) def test_clip_sample(self): for clip_sample in [True, False]: self.check_over_configs(clip_sample=clip_sample) def test_time_indices(self): for t in [0, 500, 999]: self.check_over_forward(time_step=t) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0) - 0.0)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487) - 0.00979)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999) - 0.02)) < 1e-5 def test_full_loop_no_noise(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() num_trained_timesteps = len(scheduler) model = self.dummy_model() sample = self.dummy_sample_deter key1, key2 = random.split(random.PRNGKey(0)) for t in reversed(range(num_trained_timesteps)): # 1. predict noise residual residual = model(sample, t) # 2. predict previous mean of sample x_t-1 output = scheduler.step(state, residual, t, sample, key1) pred_prev_sample = output.prev_sample state = output.state key1, key2 = random.split(key2) # if t > 0: # noise = self.dummy_sample_deter # variance = scheduler.get_variance(t) ** (0.5) * noise # # sample = pred_prev_sample + variance sample = pred_prev_sample result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 255.0714) < 1e-2 assert abs(result_mean - 0.332124) < 1e-3 else: assert abs(result_sum - 255.1113) < 1e-2 assert abs(result_mean - 0.332176) < 1e-3 @require_flax class FlaxDDIMSchedulerTest(FlaxSchedulerCommonTest): scheduler_classes = (FlaxDDIMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() key1, key2 = random.split(random.PRNGKey(0)) num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter state = scheduler.set_timesteps(state, num_inference_steps) for t in state.timesteps: residual = model(sample, t) output = scheduler.step(state, residual, t, sample) sample = output.prev_sample state = output.state key1, key2 = random.split(key2) return sample def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, _ = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, _ = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, 1, sample, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) kwargs.update(forward_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: sample, _ = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output = scheduler.step(state, residual, time_step, sample, **kwargs).prev_sample new_output = new_scheduler.step(new_state, residual, time_step, sample, **kwargs).prev_sample assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): return t.at[t != t].set(0) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), msg=( "Tuple and dict output are not equal. Difference:" f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, _ = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs) recursive_check(outputs_tuple[0], outputs_dict.prev_sample) def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, _ = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps output_0 = scheduler.step(state, residual, 0, sample, **kwargs).prev_sample output_1 = scheduler.step(state, residual, 1, sample, **kwargs).prev_sample self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 500, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, 5) assert jnp.equal(state.timesteps, jnp.array([801, 601, 401, 201, 1])).all() def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_time_indices(self): for t in [1, 10, 49]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 10, 50], [10, 50, 500]): self.check_over_forward(time_step=t, num_inference_steps=num_inference_steps) def test_variance(self): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 420, 400) - 0.14771)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 980, 960) - 0.32460)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 0, 0) - 0.0)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 487, 486) - 0.00979)) < 1e-5 assert jnp.sum(jnp.abs(scheduler._get_variance(state, 999, 998) - 0.02)) < 1e-5 def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) assert abs(result_sum - 172.0067) < 1e-2 assert abs(result_mean - 0.223967) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 149.8409) < 1e-2 assert abs(result_mean - 0.1951) < 1e-3 else: assert abs(result_sum - 149.8295) < 1e-2 assert abs(result_mean - 0.1951) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": pass # FIXME: both result_sum and result_mean are nan on TPU # assert jnp.isnan(result_sum) # assert jnp.isnan(result_mean) else: assert abs(result_sum - 149.0784) < 1e-2 assert abs(result_mean - 0.1941) < 1e-3 def test_prediction_type(self): for prediction_type in ["epsilon", "sample", "v_prediction"]: self.check_over_configs(prediction_type=prediction_type) @require_flax class FlaxPNDMSchedulerTest(FlaxSchedulerCommonTest): scheduler_classes = (FlaxPNDMScheduler,) forward_default_kwargs = (("num_inference_steps", 50),) def get_scheduler_config(self, **kwargs): config = { "num_train_timesteps": 1000, "beta_start": 0.0001, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**kwargs) return config def check_over_configs(self, time_step=0, **config): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample, _ = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) # copy over dummy past residuals state = state.replace(ets=dummy_past_residuals[:]) with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape) # copy over dummy past residuals new_state = new_state.replace(ets=dummy_past_residuals[:]) (prev_sample, state) = scheduler.step_prk(state, residual, time_step, sample, **kwargs) (new_prev_sample, new_state) = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs) assert jnp.sum(jnp.abs(prev_sample - new_prev_sample)) < 1e-5, "Scheduler outputs are not identical" output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs) new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs) assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def test_from_save_pretrained(self): pass def test_scheduler_outputs_equivalence(self): def set_nan_tensor_to_zero(t): return t.at[t != t].set(0) def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif isinstance(tuple_object, Dict): for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assertTrue( jnp.allclose(set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5), msg=( "Tuple and dict output are not equal. Difference:" f" {jnp.max(jnp.abs(tuple_object - dict_object))}. Tuple has `nan`:" f" {jnp.isnan(tuple_object).any()} and `inf`: {jnp.isinf(tuple_object)}. Dict has" f" `nan`: {jnp.isnan(dict_object).any()} and `inf`: {jnp.isinf(dict_object)}." ), ) kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, _ = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_dict = scheduler.step(state, residual, 0, sample, **kwargs) if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps outputs_tuple = scheduler.step(state, residual, 0, sample, return_dict=False, **kwargs) recursive_check(outputs_tuple[0], outputs_dict.prev_sample) def check_over_forward(self, time_step=0, **forward_kwargs): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) sample, _ = self.dummy_sample residual = 0.1 * sample dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) # copy over dummy past residuals (must be after setting timesteps) scheduler.ets = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(tmpdirname) new_scheduler, new_state = scheduler_class.from_pretrained(tmpdirname) # copy over dummy past residuals new_state = new_scheduler.set_timesteps(new_state, num_inference_steps, shape=sample.shape) # copy over dummy past residual (must be after setting timesteps) new_state.replace(ets=dummy_past_residuals[:]) output, state = scheduler.step_prk(state, residual, time_step, sample, **kwargs) new_output, new_state = new_scheduler.step_prk(new_state, residual, time_step, sample, **kwargs) assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" output, _ = scheduler.step_plms(state, residual, time_step, sample, **kwargs) new_output, _ = new_scheduler.step_plms(new_state, residual, time_step, sample, **kwargs) assert jnp.sum(jnp.abs(output - new_output)) < 1e-5, "Scheduler outputs are not identical" def full_loop(self, **config): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(**config) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() num_inference_steps = 10 model = self.dummy_model() sample = self.dummy_sample_deter state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) for i, t in enumerate(state.prk_timesteps): residual = model(sample, t) sample, state = scheduler.step_prk(state, residual, t, sample) for i, t in enumerate(state.plms_timesteps): residual = model(sample, t) sample, state = scheduler.step_plms(state, residual, t, sample) return sample def test_step_shape(self): kwargs = dict(self.forward_default_kwargs) num_inference_steps = kwargs.pop("num_inference_steps", None) for scheduler_class in self.scheduler_classes: scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() sample, _ = self.dummy_sample residual = 0.1 * sample if num_inference_steps is not None and hasattr(scheduler, "set_timesteps"): state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) elif num_inference_steps is not None and not hasattr(scheduler, "set_timesteps"): kwargs["num_inference_steps"] = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) dummy_past_residuals = jnp.array([residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]) state = state.replace(ets=dummy_past_residuals[:]) output_0, state = scheduler.step_prk(state, residual, 0, sample, **kwargs) output_1, state = scheduler.step_prk(state, residual, 1, sample, **kwargs) self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) output_0, state = scheduler.step_plms(state, residual, 0, sample, **kwargs) output_1, state = scheduler.step_plms(state, residual, 1, sample, **kwargs) self.assertEqual(output_0.shape, sample.shape) self.assertEqual(output_0.shape, output_1.shape) def test_timesteps(self): for timesteps in [100, 1000]: self.check_over_configs(num_train_timesteps=timesteps) def test_steps_offset(self): for steps_offset in [0, 1]: self.check_over_configs(steps_offset=steps_offset) scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config(steps_offset=1) scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, 10, shape=()) assert jnp.equal( state.timesteps, jnp.array([901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]), ).all() def test_betas(self): for beta_start, beta_end in zip([0.0001, 0.001], [0.002, 0.02]): self.check_over_configs(beta_start=beta_start, beta_end=beta_end) def test_schedules(self): for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=schedule) def test_time_indices(self): for t in [1, 5, 10]: self.check_over_forward(time_step=t) def test_inference_steps(self): for t, num_inference_steps in zip([1, 5, 10], [10, 50, 100]): self.check_over_forward(num_inference_steps=num_inference_steps) def test_pow_of_3_inference_steps(self): # earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3 num_inference_steps = 27 for scheduler_class in self.scheduler_classes: sample, _ = self.dummy_sample residual = 0.1 * sample scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() state = scheduler.set_timesteps(state, num_inference_steps, shape=sample.shape) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(state.prk_timesteps[:2]): sample, state = scheduler.step_prk(state, residual, t, sample) def test_inference_plms_no_past_residuals(self): with self.assertRaises(ValueError): scheduler_class = self.scheduler_classes[0] scheduler_config = self.get_scheduler_config() scheduler = scheduler_class(**scheduler_config) state = scheduler.create_state() scheduler.step_plms(state, self.dummy_sample, 1, self.dummy_sample).prev_sample def test_full_loop_no_noise(self): sample = self.full_loop() result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 198.1275) < 1e-2 assert abs(result_mean - 0.2580) < 1e-3 else: assert abs(result_sum - 198.1318) < 1e-2 assert abs(result_mean - 0.2580) < 1e-3 def test_full_loop_with_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=True, beta_start=0.01) result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 186.83226) < 1e-2 assert abs(result_mean - 0.24327) < 1e-3 else: assert abs(result_sum - 186.9466) < 1e-2 assert abs(result_mean - 0.24342) < 1e-3 def test_full_loop_with_no_set_alpha_to_one(self): # We specify different beta, so that the first alpha is 0.99 sample = self.full_loop(set_alpha_to_one=False, beta_start=0.01) result_sum = jnp.sum(jnp.abs(sample)) result_mean = jnp.mean(jnp.abs(sample)) if jax_device == "tpu": assert abs(result_sum - 186.83226) < 1e-2 assert abs(result_mean - 0.24327) < 1e-3 else: assert abs(result_sum - 186.9482) < 1e-2 assert abs(result_mean - 0.2434) < 1e-3
diffusers/tests/schedulers/test_scheduler_flax.py/0
{ "file_path": "diffusers/tests/schedulers/test_scheduler_flax.py", "repo_id": "diffusers", "token_count": 18869 }
151
# coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import glob import os import re import subprocess # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py DIFFUSERS_PATH = "src/diffusers" REPO_PATH = "." def _should_continue(line, indent): return line.startswith(indent) or len(line) <= 1 or re.search(r"^\s*\)(\s*->.*:|:)\s*$", line) is not None def find_code_in_diffusers(object_name): """Find and return the code source code of `object_name`.""" parts = object_name.split(".") i = 0 # First let's find the module where our object lives. module = parts[i] while i < len(parts) and not os.path.isfile(os.path.join(DIFFUSERS_PATH, f"{module}.py")): i += 1 if i < len(parts): module = os.path.join(module, parts[i]) if i >= len(parts): raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}.") with open( os.path.join(DIFFUSERS_PATH, f"{module}.py"), "r", encoding="utf-8", newline="\n", ) as f: lines = f.readlines() # Now let's find the class / func in the code! indent = "" line_index = 0 for name in parts[i + 1 :]: while ( line_index < len(lines) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)", lines[line_index]) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(lines): raise ValueError(f" {object_name} does not match any function or class in {module}.") # We found the beginning of the class / func, now let's find the end (when the indent diminishes). start_index = line_index while line_index < len(lines) and _should_continue(lines[line_index], indent): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 code_lines = lines[start_index:line_index] return "".join(code_lines) _re_copy_warning = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)") _re_replace_pattern = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)") _re_fill_pattern = re.compile(r"<FILL\s+[^>]*>") def get_indent(code): lines = code.split("\n") idx = 0 while idx < len(lines) and len(lines[idx]) == 0: idx += 1 if idx < len(lines): return re.search(r"^(\s*)\S", lines[idx]).groups()[0] return "" def run_ruff(code): command = ["ruff", "format", "-", "--config", "pyproject.toml", "--silent"] process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, _ = process.communicate(input=code.encode()) return stdout.decode() def stylify(code: str) -> str: """ Applies the ruff part of our `make style` command to some code. This formats the code using `ruff format`. As `ruff` does not provide a python api this cannot be done on the fly. Args: code (`str`): The code to format. Returns: `str`: The formatted code. """ has_indent = len(get_indent(code)) > 0 if has_indent: code = f"class Bla:\n{code}" formatted_code = run_ruff(code) return formatted_code[len("class Bla:\n") :] if has_indent else formatted_code def is_copy_consistent(filename, overwrite=False): """ Check if the code commented as a copy in `filename` matches the original. Return the differences or overwrites the content depending on `overwrite`. """ with open(filename, "r", encoding="utf-8", newline="\n") as f: lines = f.readlines() diffs = [] line_index = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(lines): search = _re_copy_warning.search(lines[line_index]) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. indent, object_name, replace_pattern = search.groups() theoretical_code = find_code_in_diffusers(object_name) theoretical_indent = get_indent(theoretical_code) start_index = line_index + 1 if indent == theoretical_indent else line_index + 2 indent = theoretical_indent line_index = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. should_continue = True while line_index < len(lines) and should_continue: line_index += 1 if line_index >= len(lines): break line = lines[line_index] should_continue = _should_continue(line, indent) and re.search(f"^{indent}# End copy", line) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1]) <= 1: line_index -= 1 observed_code_lines = lines[start_index:line_index] observed_code = "".join(observed_code_lines) # Remove any nested `Copied from` comments to avoid circular copies theoretical_code = [line for line in theoretical_code.split("\n") if _re_copy_warning.search(line) is None] theoretical_code = "\n".join(theoretical_code) # Before comparing, use the `replace_pattern` on the original code. if len(replace_pattern) > 0: patterns = replace_pattern.replace("with", "").split(",") patterns = [_re_replace_pattern.search(p) for p in patterns] for pattern in patterns: if pattern is None: continue obj1, obj2, option = pattern.groups() theoretical_code = re.sub(obj1, obj2, theoretical_code) if option.strip() == "all-casing": theoretical_code = re.sub(obj1.lower(), obj2.lower(), theoretical_code) theoretical_code = re.sub(obj1.upper(), obj2.upper(), theoretical_code) # stylify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line theoretical_code = stylify(lines[start_index - 1] + theoretical_code) theoretical_code = theoretical_code[len(lines[start_index - 1]) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index]) if overwrite: lines = lines[:start_index] + [theoretical_code] + lines[line_index:] line_index = start_index + 1 if overwrite and len(diffs) > 0: # Warn the user a file has been modified. print(f"Detected changes, rewriting {filename}.") with open(filename, "w", encoding="utf-8", newline="\n") as f: f.writelines(lines) return diffs def check_copies(overwrite: bool = False): all_files = glob.glob(os.path.join(DIFFUSERS_PATH, "**/*.py"), recursive=True) diffs = [] for filename in all_files: new_diffs = is_copy_consistent(filename, overwrite) diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs] if not overwrite and len(diffs) > 0: diff = "\n".join(diffs) raise Exception( "Found the following copy inconsistencies:\n" + diff + "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." ) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.", ) args = parser.parse_args() check_copies(args.fix_and_overwrite)
diffusers/utils/check_copies.py/0
{ "file_path": "diffusers/utils/check_copies.py", "repo_id": "diffusers", "token_count": 3397 }
152
<jupyter_start><jupyter_text>Diffusion for Audio In this notebook, we're going to take a brief look at generating audio with diffusion models. What you will learn:- How audio is represented in a computer- Methods to convert between raw audio data and spectrograms- How to prepare a dataloader with a custom collate function to convert audio slices into spectrograms- Fine-tuning an existing audio diffusion model on a specific genre of music- Uploading your custom pipeline to the Hugging Face hubCaveat: This is mostly for educational purposes - no guarantees our model will sound good 😉.Let's get started! Setup and Imports<jupyter_code>%pip install -q datasets diffusers torchaudio accelerate import torch, random import numpy as np import torch.nn.functional as F from tqdm.auto import tqdm from IPython.display import Audio from matplotlib import pyplot as plt from diffusers import DiffusionPipeline from torchaudio import transforms as AT from torchvision import transforms as IT<jupyter_output><empty_output><jupyter_text>Sampling from a Pre-Trained Audio PipelineLet's begin by following the [Audio Diffusion docs](https://huggingface.co/docs/diffusers/api/pipelines/audio_diffusion) to load a pre-existing audio diffusion model pipeline:<jupyter_code># Load a pre-trained audio diffusion pipeline device = "mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu" pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-instrumental-hiphop-256").to(device)<jupyter_output><empty_output><jupyter_text>As with the pipelines we've used in previous units, we can create samples by calling the pipeline like so:<jupyter_code># Sample from the pipeline and display the outputs output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate()))<jupyter_output><empty_output><jupyter_text>Here, the `rate` argument specifies the _sampling rate_ for the audio; we'll take a deeper look at this later. You'll also notice there are multiple things returned by the pipeline. What's going on here? Let's take a closer look at both outputs.The first is an array of data, representing the generated audio:<jupyter_code># The audio array output.audios[0].shape<jupyter_output><empty_output><jupyter_text>The second looks like a greyscale image:<jupyter_code># The output image (spectrogram) output.images[0].size<jupyter_output><empty_output><jupyter_text>This gives us a hint at how this pipeline works. The audio is not directly generated with diffusion - instead, the pipeline has the same kind of 2D UNet as the unconditional image generation pipelines we saw in [Unit 1](https://github.com/huggingface/diffusion-models-class/tree/main/unit1) that is used to generate the spectrogram, which is then post-processed into the final audio.The pipe has an extra component that handles these conversions, which we can access via `pipe.mel`:<jupyter_code>pipe.mel<jupyter_output><empty_output><jupyter_text>From Audio to Image and Back Again An audio 'waveform' encodes the raw audio samples over time - this could be the electrical signal received from a microphone, for example. Working with this 'Time Domain' representation can be tricky, so it is a common practice to convert it into some other form, commonly something called a spectrogram. A spectrogram shows the intensity of different frequencies (y axis) vs time (x axis):<jupyter_code># Calculate and show a spectrogram for our generated audio sample using torchaudio spec_transform = AT.Spectrogram(power=2) spectrogram = spec_transform(torch.tensor(output.audios[0])) print(spectrogram.min(), spectrogram.max()) log_spectrogram = spectrogram.log() plt.imshow(log_spectrogram[0], cmap='gray');<jupyter_output>tensor(0.) tensor(6.0842)<jupyter_text>The spectrogram we just made has values between 0.0000000000001 and 1, with most being close to the low end of that range. This is not ideal for visualization or modelling - in fact we had to take the log of these values to get a greyscale plot that showed any detail. For this reason, we typically use a special kind of spectrogram called a Mel spectrogram, which is designed to capture the kinds of information which are important for human hearing by applying some transforms to the different frequency components of the signal. _Some audio transforms from the [torchaudio docs](https://pytorch.org/audio/stable/transforms.html)_ Luckily for us, we don't even need to worry too much about these transforms - the pipeline's `mel` functionality handles these details for us. Using this, we can convert a spectrogram image to audio like so:<jupyter_code>a = pipe.mel.image_to_audio(output.images[0]) a.shape<jupyter_output><empty_output><jupyter_text>And we can convert an array of audio data into a spectrogram images by first loading the raw audio data and then calling the `audio_slice_to_image()` function. Longer clips are automatically sliced into chunks of the correct length to produce a 256x256 spectrogram image:<jupyter_code>pipe.mel.load_audio(raw_audio=a) im = pipe.mel.audio_slice_to_image(0) im<jupyter_output><empty_output><jupyter_text>The audio is represented as a long array of numbers. To play this out loud we need one more key piece of information: the sample rate. How many samples (individual values) do we use to represent a single second of audio? We can see the sample rate used during training of this pipeline with:<jupyter_code>sample_rate_pipeline = pipe.mel.get_sample_rate() sample_rate_pipeline<jupyter_output><empty_output><jupyter_text>If we specify the sample rate incorrectly, we get audio that is sped up or slowed down:<jupyter_code>display(Audio(output.audios[0], rate=44100)) # 2x speed<jupyter_output><empty_output><jupyter_text>Fine-Tuning the pipeline Now that we have a rough understanding of how the pipeline works, let's fine-tune it on some new audio data! The dataset is a collection of audio clips in different genres, which we can load from the hub like so:<jupyter_code>from datasets import load_dataset dataset = load_dataset('lewtun/music_genres', split='train') dataset<jupyter_output>Using custom data configuration lewtun--music_genres-2cfa9201f94788d8 Found cached dataset parquet (/home/ubuntu/.cache/huggingface/datasets/lewtun___parquet/lewtun--music_genres-2cfa9201f94788d8/0.0.0/2a3b91fbd88a2c90d1dbbb32b460cf621d31bd5b05b934492fdef7d8d6f236ec)<jupyter_text>You can use the code below to see the different genres in the dataset and how many samples are contained in each:<jupyter_code>for g in list(set(dataset['genre'])): print(g, sum(x==g for x in dataset['genre']))<jupyter_output>Pop 945 Blues 58 Punk 2582 Old-Time / Historic 408 Experimental 1800 Folk 1214 Electronic 3071 Spoken 94 Classical 495 Country 142 Instrumental 1044 Chiptune / Glitch 1181 International 814 Ambient Electronic 796 Jazz 306 Soul-RnB 94 Hip-Hop 1757 Easy Listening 13 Rock 3095<jupyter_text>The dataset has the audio as arrays:<jupyter_code>audio_array = dataset[0]['audio']['array'] sample_rate_dataset = dataset[0]['audio']['sampling_rate'] print('Audio array shape:', audio_array.shape) print('Sample rate:', sample_rate_dataset) display(Audio(audio_array, rate=sample_rate_dataset))<jupyter_output>Audio array shape: (1323119,) Sample rate: 44100<jupyter_text>Note that the sample rate of this audio is higher - if we want to use the existing pipeline we'll need to 'resample' it to match. The clips are also longer than the ones the pipeline is set up for. Fortunately, when we load the audio using `pipe.mel` it automatically slices the clip into smaller sections:<jupyter_code>a = dataset[0]['audio']['array'] # Get the audio array pipe.mel.load_audio(raw_audio=a) # Load it with pipe.mel pipe.mel.audio_slice_to_image(0) # View the first 'slice' as a spectrogram<jupyter_output><empty_output><jupyter_text>We need to remember to adjust the sampling rate, since the data from this dataset has twice as many samples per second:<jupyter_code>sample_rate_dataset = dataset[0]['audio']['sampling_rate'] sample_rate_dataset<jupyter_output><empty_output><jupyter_text>Here we use torchaudio's transforms (imported as AT) to do the resampling, the pipe's `mel` to turn audio into an image and torchvision's transforms (imported as IT) to turn images into tensors. This gives us a function that turns an audio clip into a spectrogram tensor that we can use for training:<jupyter_code>resampler = AT.Resample(sample_rate_dataset, sample_rate_pipeline, dtype=torch.float32) to_t = IT.ToTensor() def to_image(audio_array): audio_tensor = torch.tensor(audio_array).to(torch.float32) audio_tensor = resampler(audio_tensor) pipe.mel.load_audio(raw_audio=np.array(audio_tensor)) num_slices = pipe.mel.get_number_of_slices() slice_idx = random.randint(0, num_slices-1) # Pic a random slice each time (excluding the last short slice) im = pipe.mel.audio_slice_to_image(slice_idx) return im<jupyter_output><empty_output><jupyter_text>We'll use our `to_image()` function as part of a custom collate function to turn our dataset into a dataloader we can use for training. The collate function defines how to transform a batch of examples from the dataset into the final batch of data ready for training. In this case we turn each audio sample into a spectrogram image and stack the resulting tensors together:<jupyter_code>def collate_fn(examples): # to image -> to tensor -> rescale to (-1, 1) -> stack into batch audio_ims = [to_t(to_image(x['audio']['array']))*2-1 for x in examples] return torch.stack(audio_ims) # Create a dataset with only the 'Chiptune / Glitch' genre of songs batch_size = 4 # 4 on colab, 12 on A100 chosen_genre = 'Electronic' # <<< Try training on different genres <<< indexes = [i for i, g in enumerate(dataset['genre']) if g == chosen_genre] filtered_dataset = dataset.select(indexes) dl = torch.utils.data.DataLoader(filtered_dataset.shuffle(), batch_size=batch_size, collate_fn=collate_fn, shuffle=True) batch = next(iter(dl)) print(batch.shape)<jupyter_output>torch.Size([4, 1, 256, 256])<jupyter_text>**NB: You will need to use a lower batch size (e.g., 4) unless you have plenty of GPU vRAM available.** Training LoopHere is a simple training loop that runs through the dataloader for a few epochs to fine-tune the pipeline's UNet. You can also skip this cell and load the pipeline with the code in the following cell.<jupyter_code>epochs = 3 lr = 1e-4 pipe.unet.train() pipe.scheduler.set_timesteps(1000) optimizer = torch.optim.AdamW(pipe.unet.parameters(), lr=lr) for epoch in range(epochs): for step, batch in tqdm(enumerate(dl), total=len(dl)): # Prepare the input images clean_images = batch.to(device) bs = clean_images.shape[0] # Sample a random timestep for each image timesteps = torch.randint( 0, pipe.scheduler.num_train_timesteps, (bs,), device=clean_images.device ).long() # Add noise to the clean images according to the noise magnitude at each timestep noise = torch.randn(clean_images.shape).to(clean_images.device) noisy_images = pipe.scheduler.add_noise(clean_images, noise, timesteps) # Get the model prediction noise_pred = pipe.unet(noisy_images, timesteps, return_dict=False)[0] # Calculate the loss loss = F.mse_loss(noise_pred, noise) loss.backward(loss) # Update the model parameters with the optimizer optimizer.step() optimizer.zero_grad() # OR: Load the version I trained earlier pipe = DiffusionPipeline.from_pretrained("johnowhitaker/Electronic_test").to(device) output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=22050)) # Make a longer sample by passing in a starting noise tensor with a different shape noise = torch.randn(1, 1, pipe.unet.sample_size[0], pipe.unet.sample_size[1]*4).to(device) output = pipe(noise=noise) display(output.images[0]) display(Audio(output.audios[0], rate=22050))<jupyter_output><empty_output><jupyter_text>Not the most amazing-sounding outputs, but it's a start :) Explore tweaking the learning rate and number of epochs, and share your best results on Discord so we can improve together! Some things to consider:- We're working with 256px square spectrogram images which limits our batch size. Can you recover audio of sufficient quality from a 128x128 spectrogram?- In place of random image augmentation we're picking different slices of the audio clip each time, but could this be improved with some different kinds of augmentation when training for many epochs?- How else might we use this to generate longer clips? Perhaps you could generate a 5s starting clip and then use inpainting-inspired ideas to continue to generate additional segments of audio that follow on from the initial clip...- What is the equivalent of image-to-image in this spectrogram diffusion context? Push to HubOnce you're happy with your model, you can save it and push it to the hub for others to enjoy:<jupyter_code>from huggingface_hub import get_full_repo_name, HfApi, create_repo, ModelCard # Pick a name for the model model_name = "audio-diffusion-electronic" hub_model_id = get_full_repo_name(model_name) # Save the pipeline locally pipe.save_pretrained(model_name) # Inspect the folder contents !ls {model_name} # Create a repository create_repo(hub_model_id) # Upload the files api = HfApi() api.upload_folder( folder_path=f"{model_name}/scheduler", path_in_repo="scheduler", repo_id=hub_model_id ) api.upload_folder( folder_path=f"{model_name}/mel", path_in_repo="mel", repo_id=hub_model_id ) api.upload_folder(folder_path=f"{model_name}/unet", path_in_repo="unet", repo_id=hub_model_id) api.upload_file( path_or_fileobj=f"{model_name}/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, ) # Push a model card content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-audio-generation - diffusion-models-class --- # Model Card for Unit 4 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional audio generation of music in the genre {chosen_genre} ## Usage ```python from IPython.display import Audio from diffusers import DiffusionPipeline pipe = DiffusionPipeline.from_pretrained("{hub_model_id}") output = pipe() display(output.images[0]) display(Audio(output.audios[0], rate=pipe.mel.get_sample_rate())) ``` """ card = ModelCard(content) card.push_to_hub(hub_model_id)<jupyter_output><empty_output>
diffusion-models-class/unit4/02_diffusion_for_audio.ipynb/0
{ "file_path": "diffusion-models-class/unit4/02_diffusion_for_audio.ipynb", "repo_id": "diffusion-models-class", "token_count": 4553 }
153
# Making a Class-Conditioned Diffusion Model <CourseFloatingBanner unit={2} classNames="absolute z-10 right-0 top-0" notebooks={[ {label: "Making a Class-Conditioned Diffusion Model", value: "https://colab.research.google.com/github/huggingface/diffusion-models-class/blob/main/units/en/unit2/class_conditioned_diffusion_model_example.ipynb"}, {label: "Making a Class-Conditioned Diffusion Model", value: "https://studiolab.sagemaker.aws/import/github/huggingface/diffusion-models-class/blob/main/units/en/unit2/class_conditioned_diffusion_model_example.ipynb"}, ]} /> In this notebook we're going to illustrate one way to add conditioning information to a diffusion model. Specifically, we'll train a class-conditioned diffusion model on MNIST following on from the 'from-scratch' example in Unit 1, where we can specify which digit we'd like the model to generate at inference time. As mentioned in the introduction to this unit, this is just one of many ways we could add additional conditioning information to a diffusion model, and has been chosen for its relative simplicity. Just like the 'from-scratch' notebook in Unit 1, this notebook is mostly for illustrative purposes and you can safely skip it if you'd like. Setup and Data Prep ``` !pip install -q diffusers ``` ``` import torch import torchvision from torch import nn from torch.nn import functional as F from torch.utils.data import DataLoader from diffusers import DDPMScheduler, UNet2DModel from matplotlib import pyplot as plt from tqdm.auto import tqdm device = 'mps' if torch.backends.mps.is_available() else 'cuda' if torch.cuda.is_available() else 'cpu' print(f'Using device: {device}') ``` ``` # Load the dataset dataset = torchvision.datasets.MNIST(root="mnist/", train=True, download=True, transform=torchvision.transforms.ToTensor()) # Feed it into a dataloader (batch size 8 here just for demo) train_dataloader = DataLoader(dataset, batch_size=8, shuffle=True) # View some examples x, y = next(iter(train_dataloader)) print('Input shape:', x.shape) print('Labels:', y) plt.imshow(torchvision.utils.make_grid(x)[0], cmap='Greys'); ``` ## Creating a Class-Conditioned UNet The way we'll feed in the class conditioning is as follows: Create a standard `UNet2DModel` with some additional input channels Map the class label to a learned vector of shape (`class_emb_size`)via an embedding layer Concatenate this information as extra channels for the internal unet input with `net_input = torch.cat((x, class_cond), 1)` Feed this `net_input` (which has (`class_emb_size+1`) channels in total) into the unet to get the final prediction In this example I've set the class_emb_size to 4, but this is completely arbitrary and you could explore having it size 1 (to see if it still works), size 10 (to match the number of classes), or replacing the learned nn.Embedding with a simple one-hot encoding of the class label directly. This is what the implementation looks like: ``` class ClassConditionedUnet(nn.Module): def __init__(self, num_classes=10, class_emb_size=4): super().__init__() # The embedding layer will map the class label to a vector of size class_emb_size self.class_emb = nn.Embedding(num_classes, class_emb_size) # Self.model is an unconditional UNet with extra input channels to accept the conditioning information (the class embedding) self.model = UNet2DModel( sample_size=28, # the target image resolution in_channels=1 + class_emb_size, # Additional input channels for class cond. out_channels=1, # the number of output channels layers_per_block=2, # how many ResNet layers to use per UNet block block_out_channels=(32, 64, 64), down_block_types=( "DownBlock2D", # a regular ResNet downsampling block "AttnDownBlock2D", # a ResNet downsampling block with spatial self-attention "AttnDownBlock2D", ), up_block_types=( "AttnUpBlock2D", "AttnUpBlock2D", # a ResNet upsampling block with spatial self-attention "UpBlock2D", # a regular ResNet upsampling block ), ) # Our forward method now takes the class labels as an additional argument def forward(self, x, t, class_labels): # Shape of x: bs, ch, w, h = x.shape # class conditioning in right shape to add as additional input channels class_cond = self.class_emb(class_labels) # Map to embedding dinemsion class_cond = class_cond.view(bs, class_cond.shape[1], 1, 1).expand(bs, class_cond.shape[1], w, h) # x is shape (bs, 1, 28, 28) and class_cond is now (bs, 4, 28, 28) # Net input is now x and class cond concatenated together along dimension 1 net_input = torch.cat((x, class_cond), 1) # (bs, 5, 28, 28) # Feed this to the unet alongside the timestep and return the prediction return self.model(net_input, t).sample # (bs, 1, 28, 28) ``` If any of the shapes or transforms are confusing, add in print statements to show the relevant shapes and check that they match your expectations. I've also annotated the shapes of some intermediate variables in the hopes of making things clearer. Training and Sampling Where previously we'd do something like prediction = unet(x, t) we'll now add the correct labels as a third argument (prediction = unet(x, t, y)) during training, and at inference we can pass whatever labels we want and if all goes well the model should generate images that match. y in this case is the labels of the MNIST digits, with values from 0 to 9. The training loop is very similar to the example from Unit 1. We're now predicting the noise (rather than the denoised image as in Unit 1) to match the objective expected by the default DDPMScheduler which we're using to add noise during training and to generate samples at inference time. Training takes a while - speeding this up could be a fun mini-project, but most of you can probably just skim the code (and indeed this whole notebook) without running it since we're just illustrating an idea. ``` # Create a scheduler noise_scheduler = DDPMScheduler(num_train_timesteps=1000, beta_schedule='squaredcos_cap_v2') ``` ``` # Redefining the dataloader to set the batch size higher than the demo of 8 train_dataloader = DataLoader(dataset, batch_size=128, shuffle=True) # How many runs through the data should we do? n_epochs = 10 # Our network net = ClassConditionedUnet().to(device) # Our loss finction loss_fn = nn.MSELoss() # The optimizer opt = torch.optim.Adam(net.parameters(), lr=1e-3) # Keeping a record of the losses for later viewing losses = [] # The training loop for epoch in range(n_epochs): for x, y in tqdm(train_dataloader): # Get some data and prepare the corrupted version x = x.to(device) * 2 - 1 # Data on the GPU (mapped to (-1, 1)) y = y.to(device) noise = torch.randn_like(x) timesteps = torch.randint(0, 999, (x.shape[0],)).long().to(device) noisy_x = noise_scheduler.add_noise(x, noise, timesteps) # Get the model prediction pred = net(noisy_x, timesteps, y) # Note that we pass in the labels y # Calculate the loss loss = loss_fn(pred, noise) # How close is the output to the noise # Backprop and update the params: opt.zero_grad() loss.backward() opt.step() # Store the loss for later losses.append(loss.item()) # Print our the average of the last 100 loss values to get an idea of progress: avg_loss = sum(losses[-100:])/100 print(f'Finished epoch {epoch}. Average of the last 100 loss values: {avg_loss:05f}') # View the loss curve plt.plot(losses) ``` Once training finishes, we can sample some images feeding in different labels as our conditioning: ``` # Prepare random x to start from, plus some desired labels y x = torch.randn(80, 1, 28, 28).to(device) y = torch.tensor([[i]*8 for i in range(10)]).flatten().to(device) # Sampling loop for i, t in tqdm(enumerate(noise_scheduler.timesteps)): # Get model pred with torch.no_grad(): residual = net(x, t, y) # Again, note that we pass in our labels y # Update sample with step x = noise_scheduler.step(residual, t, x).prev_sample # Show the results fig, ax = plt.subplots(1, 1, figsize=(12, 12)) ax.imshow(torchvision.utils.make_grid(x.detach().cpu().clip(-1, 1), nrow=8)[0], cmap='Greys') ``` There we go! We can now have some control over what images are produced. I hope you've enjoyed this example. As always, feel free to ask questions in the Discord. Exercise (optional): Try this with FashionMNIST. Tweak the learning rate, batch size and number of epochs. Can you get some decet-looking fashion images with less training time than the example above?
diffusion-models-class/units/en/unit2/3.mdx/0
{ "file_path": "diffusion-models-class/units/en/unit2/3.mdx", "repo_id": "diffusion-models-class", "token_count": 2985 }
154
<jupyter_start><jupyter_text>Introduction à 🤗 Diffusers Dans ce *notebook*, vous allez entraîner votre premier modèle de diffusion pour générer des images de mignons papillons 🦋. En cours de route, vous apprendrez les composants de base de la bibliothèque 🤗 *Diffusers*, qui fournira une bonne assise pour les applications plus avancées que nous couvrirons plus tard dans le cours. Ce que vous allez apprendreDans ce *notebook*, vous allez :- Voir un puissant pipeline de modèles de diffusion personnalisé en action (avec des informations sur la façon de créer votre propre version).- Créer votre propre mini-pipeline en : - Récapitulant les idées principales derrière les modèles de diffusion - Chargement de données à partir du Hub pour l'entraînement - Explorer comment ajouter du bruit à ces données à l'aide d'un planificateur - Créer et entraîner le modèle UNet - Rassembler les pièces du puzzle pour en faire un pipeline fonctionnel- Éditer et exécuter un script pour initialiser des séries d'entraînement plus longues, qui gèrera - Entraînement multi-GPU via 🤗 *Accelerate* - Journalisation de l'expérience pour suivre les statistiques critiques - Téléchargement du modèle final sur le *Hub* d'*Hugging Face* ❓ Si vous avez des questions, merci de les poster sur le canal `diffusion-models-class` du [serveur Discord d'Hugging Face](https://huggingface.co/join/discord). PrérequisAvant de vous plonger dans ce *notebook*, vous devez :* 📖 Lire le matériel de l'Unité 1* 🤗 Créer un compte Hugging Face. Si vous ne l'avez pas encore fait, vous pouvez le faire ici : https://huggingface.co/join Étape 1 : Configuration Exécutez la cellule suivante pour installer la bibliothèque 🤗 *Diffusers* ainsi que quelques autres prérequis :<jupyter_code>%pip install -qq -U diffusers datasets transformers accelerate ftfy pyarrow==9.0.0<jupyter_output><empty_output><jupyter_text>Ensuite, rendez-vous sur https://huggingface.co/settings/tokens et créez un *tokens* d'accès avec autorisation d'écriture si vous n'en avez pas déjà un : Vous pouvez vous connecter avec ce token en utilisant la ligne de commande (`huggingface-cli login`) ou en exécutant la cellule suivante :<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Vous devez ensuite installer Git-LFS pour télécharger les *checkpoints* de votre modèle :<jupyter_code>%%capture !sudo apt -qq install git-lfs !git config --global credential.helper store<jupyter_output><empty_output><jupyter_text>Enfin, importons les bibliothèques que nous utiliserons et définissons quelques fonctions de confort que nous utiliserons plus tard dans le *notebook* :<jupyter_code>import numpy as np import torch import torch.nn.functional as F from matplotlib import pyplot as plt from PIL import Image def show_images(x): """Étant donné un lot d'images x, faire une grille et convertir en PIL""" x = x * 0.5 + 0.5 # On va de (-1, 1) et revenons (0, 1) grid = torchvision.utils.make_grid(x) grid_im = grid.detach().cpu().permute(1, 2, 0).clip(0, 1) * 255 grid_im = Image.fromarray(np.array(grid_im).astype(np.uint8)) return grid_im def make_grid(images, size=64): """Étant donné une liste d'images PIL, les empiler en une ligne pour faciliter la visualisation.""" output_im = Image.new("RGB", (size * len(images), size)) for i, im in enumerate(images): output_im.paste(im.resize((size, size)), (i * size, 0)) return output_im # Les utilisateurs de Mac peuvent avoir besoin de device = 'mps' (non testé) device = torch.device("cuda" if torch.cuda.is_available() else "cpu")<jupyter_output><empty_output><jupyter_text>OK, nous sommes prêts ! Dreambooth : un avant-goût de ce qui nous attend Si vous avez un tant soit peu consulté les médias sociaux au cours des derniers mois, vous avez certainement entendu parler de *Stable Diffusion*. Il s'agit d'un puissant modèle de diffusion latent conditionné par le texte (ne vous inquiétez pas, nous allons apprendre ce que cela signifie). Mais il a un défaut : il ne sait pas à quoi vous ou moi ressemblons, à moins que nous soyons suffisamment célèbres pour que nos images soient répandues sur internet.Dreambooth nous permet de créer notre propre variante de modèle avec une connaissance supplémentaire d'un visage, d'un objet ou d'un style spécifique. Le Corridor Crew a réalisé une excellente vidéo (en anglais) en utilisant cette technique pour raconter des histoires avec des personnages cohérents, ce qui est un excellent exemple de ce que cette technique peut faire :<jupyter_code>from IPython.display import YouTubeVideo YouTubeVideo("W4Mcuh38wyM")<jupyter_output><empty_output><jupyter_text>Voici un exemple d'une sortie d'un [modèle](https://huggingface.co/sd-dreambooth-library/mr-potato-head) entraîné sur 5 photos du jouet Monsieur Patate.Tout d'abord, nous chargeons le pipeline. Ceci télécharge les poids du modèle depuis le Hub. Étant donné que plusieurs gigaoctets de données sont téléchargés pour une démonstration d'une ligne, vous pouvez sauter cette cellule et simplement admirer la sortie de l'exemple !<jupyter_code>from diffusers import StableDiffusionPipeline # Consultez https://huggingface.co/sd-dreambooth-library pour découvrir de nombreux modèles provenant de la communauté model_id = "sd-dreambooth-library/mr-potato-head" # Chargement du pipeline pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to( device )<jupyter_output><empty_output><jupyter_text>Une fois le chargement du pipeline terminé, nous pouvons générer des images avec :<jupyter_code>prompt = "an abstract oil painting of sks mr potato head by picasso" image = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0] image<jupyter_output><empty_output><jupyter_text>**Exercice** : essayez vous-même avec des prompts différents. Le *token* `sks` représente un identifiant unique pour le nouveau concept, que se passe-t-il si vous l'omettez ? Vous pouvez aussi expérimenter en changeant le nombre de pas d'échantillonnage (jusqu'où pouvez-vous descendre ?) et le paramètre `guidance_scale`, qui détermine jusqu'à quel point le modèle va essayer de correspondre au prompt. Il se passe beaucoup de choses dans ce pipeline ! À la fin du cours, vous saurez comment tout cela fonctionne. Pour l'instant, voyons comment nous pouvons entraîner un modèle de diffusion à partir de zéro. MVP (Minimum Viable Pipeline)L'API de base de 🤗 Diffusers est divisée en trois composants principaux :- **Pipelines** : classes de haut niveau conçues pour générer rapidement des échantillons à partir de modèles de diffusion populaires entraînés de manière conviviale.- **Models** : architectures populaires pour entraîner de nouveaux modèles de diffusion, par exemple [UNet](https://arxiv.org/abs/1505.04597).- **Schedulers** : diverses techniques pour générer des images à partir du bruit pendant l'*inférence* ainsi que pour générer des images bruitées pour l'*entraînement*.Les pipelines sont parfaits pour les utilisateurs finaux, mais si vous êtes ici pour ce cours, nous supposons que vous voulez savoir ce qui se passe sous le capot ! Dans le reste de ce *notebook*, nous allons donc construire notre propre pipeline capable de générer de petites images de papillons. Voici le résultat final en action :<jupyter_code>from diffusers import DDPMPipeline # Chargement du pipeline de papillons butterfly_pipeline = DDPMPipeline.from_pretrained( "johnowhitaker/ddpm-butterflies-32px" ).to(device) # Création de 8 images images = butterfly_pipeline(batch_size=8).images # Visualisation du résultat make_grid(images)<jupyter_output><empty_output><jupyter_text>Ce n'est peut-être pas aussi impressionnant que l'exemple de DreamBooth, mais nous entraînons notre modèle à partir de zéro avec ~0,0001% des données utilisées pour entraîner Stable Diffusion. En parlant d'entraînement, rappelez-vous que l'entraînement d'un modèle de diffusion ressemble à ceci :- Chargement de quelques images à partir des données entraînées.- Ajout de bruit, en différentes quantités.- Introduction des versions bruitées des données d'entrée dans le modèle.- Évaluation de la capacité du modèle à débruiter ces données d'entrée- Utilisation de ces informations pour mettre à jour les poids du modèle, et répétition.Nous allons explorer ces étapes une par une dans les prochaines parties jusqu'à ce que nous ayons une boucle d'entraînement complète, puis nous verrons comment échantillonner à partir du modèle entraîné et comment regrouper le tout dans un pipeline pour faciliter le partage. Commençons par les données. Etape 2 : Télécharger le jeu de données d'entraînementPour cet exemple, nous utilisons un jeu de données d'images provenant du *Hub* d'*Hugging Face*. Plus précisément, cette collection de [1000 images de papillons](https://huggingface.co/datasets/huggan/smithsonian_butterflies_subset). Il s'agit d'un très petit jeu de données, c'est pourquoi nous avons aussi inclus des lignes en commentaires pour quelques options plus importantes. Si vous préférez utiliser votre propre collection d'images, vous pouvez également utiliser l'exemple de code commenté pour charger des images à partir d'un dossier.<jupyter_code>import torchvision from datasets import load_dataset from torchvision import transforms dataset = load_dataset("huggan/smithsonian_butterflies_subset", split="train") # Ou charger des images à partir d'un dossier local # dataset = load_dataset("imagefolder", data_dir="path/to/folder") # Nous entraînerons sur des images carrées de 32 pixels, mais vous pouvez aussi essayer des tailles plus grandes image_size = 32 # Vous pouvez réduire la taille de votre batch si vous manquez de mémoire GPU batch_size = 64 # Définition les augmentations de données preprocess = transforms.Compose( [ transforms.Resize((image_size, image_size)), # Redimensionner transforms.RandomHorizontalFlip(), # Retournement aléatoire transforms.ToTensor(), # Convertir en tenseur (0, 1) transforms.Normalize([0.5], [0.5]), # Passage en (-1, 1) ] ) def transform(examples): images = [preprocess(image.convert("RGB")) for image in examples["image"]] return {"images": images} dataset.set_transform(transform) # Créer un chargeur de données à partir du jeu de données pour servir les images transformées en batchs train_dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=True )<jupyter_output><empty_output><jupyter_text>Nous pouvons saisir un batch d'images et en visualiser quelques-unes comme suit :<jupyter_code>xb = next(iter(train_dataloader))["images"].to(device)[:8] print("X shape:", xb.shape) show_images(xb).resize((8 * 64, 64), resample=Image.NEAREST)<jupyter_output><empty_output><jupyter_text>Nous nous en tenons à un petit jeu de données avec des images de 32 pixels pour que les temps d'entraînement restent raisonnables dans ce *notebook*. Etape 3: Définir le planificateurNotre plan d'entraînement consiste à prendre ces images d'entrée et à leur ajouter du bruit, puis à transmettre les images bruitées au modèle. Lors de l'inférence, nous utiliserons les prédictions du modèle pour supprimer le bruit de manière itérative. Dans *Diffusers*, ces deux processus sont gérés par le *scheduler* (planificateur).Le planificateur de bruit détermine la quantité de bruit ajoutée à différents moments. Voici comment nous pourrions créer un planificateur en utilisant les paramètres par défaut pour l'entraînement et l'échantillonnage "DDPM" (d'après l'article [*Denoising Diffusion Probabalistic Models*](https://arxiv.org/abs/2006.11239)) :<jupyter_code>from diffusers import DDPMScheduler noise_scheduler = DDPMScheduler(num_train_timesteps=1000)<jupyter_output><empty_output><jupyter_text>Le papier DDPM décrit un processus de corruption qui ajoute une petite quantité de bruit à chaque pas de temps. Étant donné $x_{t-1}$ pour un certain pas de temps, nous pouvons obtenir la version suivante (légèrement plus bruyante) $x_t$ avec : $q(\mathbf{x}_t \vert \mathbf{x}_{t-1}) = \mathcal{N}(\mathbf{x}_t; \sqrt{1 - \beta_t} \mathbf{x}_{t-1}, \beta_t\mathbf{I}) \quadq(\mathbf{x}_{1:T} \vert \mathbf{x}_0) = \prod^T_{t=1} q(\mathbf{x}_t \vert \mathbf{x}_{t-1})$Nous prenons $x_{t-1}$, l'échelonnons de $\sqrt{1 - \beta_t}$ et ajoutons du bruit échelonné par $\beta_t$. Ce $\beta$ est défini pour chaque $t$ selon un certain planificateur et détermine la quantité de bruit ajoutée par pas de temps. Maintenant, nous ne voulons pas nécessairement faire cette opération 500 fois pour obtenir $x_{500}$, nous avons donc une autre formule pour obtenir $x_t$ pour n'importe quel t étant donné $x_0$ :$\begin{aligned}q(\mathbf{x}_t \vert \mathbf{x}_0) &= \mathcal{N}(\mathbf{x}_t; \sqrt{\bar{\alpha}_t} \mathbf{x}_0, {(1 - \bar{\alpha}_t)} \mathbf{I})\end{aligned}$ where $\bar{\alpha}_t = \prod_{i=1}^T \alpha_i$ and $\alpha_i = 1-\beta_i$La notation mathématique fait toujours peur ! Heureusement, le planificateur s'en charge pour nous. Nous pouvons tracer $\sqrt{\bar{\alpha}_t}$ (appelé `sqrt_alpha_prod`) et $\sqrt{(1 - \bar{\alpha}_t)}$ (appelé `sqrt_one_minus_alpha_prod`) pour voir comment l'entrée ($x$) et le bruit sont mis à l'échelle et mélangés à travers différents pas de temps :<jupyter_code>plt.plot(noise_scheduler.alphas_cumprod.cpu() ** 0.5, label=r"${\sqrt{\bar{\alpha}_t}}$") plt.plot((1 - noise_scheduler.alphas_cumprod.cpu()) ** 0.5, label=r"$\sqrt{(1 - \bar{\alpha}_t)}$") plt.legend(fontsize="x-large");<jupyter_output><empty_output><jupyter_text>**Exercice** : Vous pouvez explorer comment ce graphique change avec différents paramètres pour `beta_start`, `beta_end` et `beta_schedule` en remplaçant l'une des options commentées ci-dessous :<jupyter_code>## Exemple avec beaucoup de bruit ajouté : # noise_scheduler = DDPMScheduler(num_train_timesteps=1000, beta_start=0.001, beta_end=0.004) ## Le planificateur cosinus pouvant s'avérer meilleur pour les images de petite taille : # noise_scheduler = DDPMScheduler(num_train_timesteps=1000, beta_schedule='squaredcos_cap_v2')<jupyter_output><empty_output><jupyter_text>Quel que soit le planificateur que vous avez choisi, nous pouvons maintenant l'utiliser pour ajouter du bruit en différentes quantités en utilisant la fonction `noise_scheduler.add_noise` comme suit :<jupyter_code>timesteps = torch.linspace(0, 999, 8).long().to(device) noise = torch.randn_like(xb) noisy_xb = noise_scheduler.add_noise(xb, noise, timesteps) print("Noisy X shape", noisy_xb.shape) show_images(noisy_xb).resize((8 * 64, 64), resample=Image.NEAREST)<jupyter_output><empty_output><jupyter_text>Là encore, étudiez l'effet de l'utilisation de différents planificateurs et paramètres de bruit. Cette [vidéo](https://www.youtube.com/watch?v=fbLgFrlTnGU) (en anglais) explique en détail certains des calculs ci-dessus et constitue une excellente introduction à certains de ces concepts. Etape 4 : Définir le modèle Nous en arrivons maintenant à l'élément central : le modèle lui-même.La plupart des modèles de diffusion utilisent des architectures qui sont des variantes d'un [U-net](https://arxiv.org/abs/1505.04597) et c'est ce que nous utiliserons ici.En bref :- l'image en entrée du modèle passe par plusieurs blocs de couches ResNet, chacun divisant la taille de l'image par 2- puis elle passe à travers le même nombre de blocs qui la suréchantillonnent.- il y a des *skip connections* qui relient les caractéristiques sur le chemin du sous-échantillonnage aux couches correspondantes dans le chemin du suréchantillonnage.L'une des principales caractéristiques de ce modèle est qu'il prédit des images de la même taille que l'entrée, ce qui est exactement ce dont nous avons besoin ici.*Diffusers* nous fournit une classe `UNet2DModel` pratique qui crée l'architecture désirée dans PyTorch.Créons un U-net pour la taille d'image désirée. Notez que les `down_block_types` correspondent aux blocs de sous-échantillonnage (en vert sur le diagramme ci-dessus), et que les `up_block_types` sont les blocs de suréchantillonnage (en rouge sur le diagramme) :<jupyter_code>from diffusers import UNet2DModel # Création d'un modèle model = UNet2DModel( sample_size=image_size, # la résolution de l'image cible in_channels=3, # le nombre de canaux d'entrée, 3 pour les images RVB out_channels=3, # le nombre de canaux de sortie layers_per_block=2, # le nombre de couches ResNet à utiliser par bloc UNet block_out_channels=(64, 128, 128, 256), # Plus de canaux -> plus de paramètres down_block_types=( "DownBlock2D", # un bloc de sous-échantillonnage ResNet standard "DownBlock2D", "AttnDownBlock2D", # un bloc de sous-échantillonnage ResNet avec auto-attention spatiale "AttnDownBlock2D", ), up_block_types=( "AttnUpBlock2D", "AttnUpBlock2D", # un bloc de suréchantillonnage ResNet avec auto-attention spatiale "UpBlock2D", "UpBlock2D", # un bloc de suréchantillonnage ResNet standard ), ) model.to(device)<jupyter_output><empty_output><jupyter_text>Lorsque vous traitez des données d'entrée en haute résolution, vous pouvez utiliser davantage de blocs descendants et ascendants, et ne conserver les couches d'attention que pour les couches de résolution les plus basses (inférieures) afin de réduire l'utilisation de la mémoire. Nous verrons plus tard comment vous pouvez expérimenter pour trouver les meilleurs paramètres pour votre cas d'utilisation. Nous pouvons vérifier que le passage d'un batch de données et de pas de temps aléatoires produit une sortie de même forme que les données d'entrée :<jupyter_code>with torch.no_grad(): model_prediction = model(noisy_xb, timesteps).sample model_prediction.shape<jupyter_output><empty_output><jupyter_text>Dans la section suivante, nous verrons comment entraîner ce modèle. Etape 5 : Créer une boucle d'entraînement Il est temps d'entraîner ! Voici une boucle d'optimisation typique dans PyTorch, où nous parcourons les données batch par batch et mettons à jour les paramètres de notre modèle à chaque étape à l'aide d'un optimiseur, ici, l'optimiseur AdamW avec un taux d'apprentissage de 0,0004.Pour chaque batch de données, nous- échantillonnons des pas de temps aléatoires- bruitons les données en conséquence- transmettons les données bruitées au modèle- comparons les prédictions du modèle avec la cible (c'est-à-dire le bruit dans ce cas) en utilisant l'erreur quadratique moyenne comme fonction de perte- mettons à jour les paramètres du modèle via `loss.backward()` et `optimizer.step()`.Au cours de ce processus, nous enregistrons aussi les pertes au fil du temps pour un tracé ultérieur.NB : ce code prend près de 10 minutes à exécuter. N'hésitez pas à sauter ces deux cellules et à utiliser le modèle pré-entraîné si vous êtes pressé. Vous pouvez également étudier comment la réduction du nombre de canaux dans chaque couche via la définition du modèle ci-dessus peut accélérer les choses. L'[exemple officiel d'entraînement](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) de *Diffusers* entraîne un modèle plus grand sur ce jeu de données à une résolution plus élevée, et constitue une bonne référence pour ce à quoi ressemble une boucle d'entraînement moins minimale :<jupyter_code># Définir le planificateur de bruit noise_scheduler = DDPMScheduler( num_train_timesteps=1000, beta_schedule="squaredcos_cap_v2" ) # Boucle d'entraînement optimizer = torch.optim.AdamW(model.parameters(), lr=4e-4) losses = [] for epoch in range(30): for step, batch in enumerate(train_dataloader): clean_images = batch["images"].to(device) # Exemple de bruit à ajouter aux images noise = torch.randn(clean_images.shape).to(clean_images.device) bs = clean_images.shape[0] # Échantillonner un pas de temps aléatoire pour chaque image timesteps = torch.randint( 0, noise_scheduler.num_train_timesteps, (bs,), device=clean_images.device ).long() # Ajouter du bruit aux images propres en fonction de l'ampleur du bruit à chaque étape noisy_images = noise_scheduler.add_noise(clean_images, noise, timesteps) # Obtenir la prédiction du modèle noise_pred = model(noisy_images, timesteps, return_dict=False)[0] # Calculer la perte loss = F.mse_loss(noise_pred, noise) loss.backward(loss) losses.append(loss.item()) # Mise à jour des paramètres du modèle à l'aide de l'optimiseur optimizer.step() optimizer.zero_grad() if (epoch + 1) % 5 == 0: loss_last_epoch = sum(losses[-len(train_dataloader) :]) / len(train_dataloader) print(f"Epoch:{epoch+1}, loss: {loss_last_epoch}")<jupyter_output><empty_output><jupyter_text>En traçant la perte, nous constatons que le modèle s'améliore rapidement dans un premier temps, puis continue à s'améliorer à un rythme plus lent (ce qui est plus évident si nous utilisons une échelle logarithmique, comme indiqué à droite) :<jupyter_code>fig, axs = plt.subplots(1, 2, figsize=(12, 4)) axs[0].plot(losses) axs[1].plot(np.log(losses)) plt.show()<jupyter_output><empty_output><jupyter_text>Au lieu d'exécuter le code d'entraînement ci-dessus, vous pouvez utiliser le modèle du pipeline comme suit :<jupyter_code>## Décommenter pour charger le modèle que j'ai entraîné plus tôt à la place : # model = butterfly_pipeline.unet<jupyter_output><empty_output><jupyter_text>Etape 6 : Générer des imagesComment obtenir des images avec ce modèle ? Option 1 : Création d'un pipeline<jupyter_code>from diffusers import DDPMPipeline image_pipe = DDPMPipeline(unet=model, scheduler=noise_scheduler) pipeline_output = image_pipe() pipeline_output.images[0]<jupyter_output><empty_output><jupyter_text>Nous pouvons enregistrer un pipeline dans un dossier local comme suit :<jupyter_code>image_pipe.save_pretrained("my_pipeline")<jupyter_output><empty_output><jupyter_text>Inspection du contenu du dossier :<jupyter_code>!ls my_pipeline/<jupyter_output><empty_output><jupyter_text>Les sous-dossiers `scheduler` et `unet` contiennent tout ce qui est nécessaire pour recréer ces composants. Par exemple, dans le dossier `unet` vous trouverez les poids du modèle (`diffusion_pytorch_model.bin`) ainsi qu'un fichier de configuration qui spécifie l'architecture UNet.<jupyter_code>!ls my_pipeline/unet/<jupyter_output><empty_output><jupyter_text>Ensemble, ces fichiers contiennent tout ce qui est nécessaire pour recréer le pipeline. Vous pouvez les télécharger manuellement sur le *Hub* pour partager le pipeline avec d'autres personnes, ou consulter le code pour le faire via l'API dans la section suivante. Option 2: Writing a Sampling LoopSi vous inspectez la méthode `forward` du pipeline, vous pourrez voir ce qui se passe lorsque nous lançons `image_pipe()` :<jupyter_code># ??image_pipe.forward<jupyter_output><empty_output><jupyter_text>Nous commençons par un bruit aléatoire et parcourons les pas de temps de l'ordonnanceur du plus bruyant au moins bruyant, en supprimant une petite quantité de bruit à chaque étape sur la base de la prédiction du modèle :<jupyter_code># Point de départ aléatoire (8 images aléatoires) : sample = torch.randn(8, 3, 32, 32).to(device) for i, t in enumerate(noise_scheduler.timesteps): # Obtenir le modèle de prédiction with torch.no_grad(): residual = model(sample, t).sample # Mise à jour de l'échantillon avec le pas sample = noise_scheduler.step(residual, t, sample).prev_sample show_images(sample)<jupyter_output><empty_output><jupyter_text>La fonction `noise_scheduler.step()` effectue les calculs nécessaires pour mettre à jour `sample` de manière appropriée. Il existe un certain nombre de méthodes d'échantillonnage. Dans l'unité suivante, nous verrons comment nous pouvons échanger un échantillonneur différent pour accélérer la génération d'images avec des modèles existants, et nous parlerons plus en détail de la théorie derrière l'échantillonnage des modèles de diffusion. Etape 7 : Pousser votre modèle vers le *Hub*Dans l'exemple ci-dessus, nous avons enregistré notre pipeline dans un dossier local. Pour pousser notre modèle vers le *Hub*, nous aurons besoin d'un dépôt de modèles dans lequel nous pourrons pousser nos fichiers. Nous déterminerons le nom du dépôt à partir de l'ID du modèle que nous voulons donner à notre modèle (n'hésitez pas à remplacer le nom du modèle par votre propre choix ; il doit juste contenir votre nom d'utilisateur, ce que fait la fonction `get_full_repo_name()`) :<jupyter_code>from huggingface_hub import get_full_repo_name model_name = "sd-class-butterflies-32" hub_model_id = get_full_repo_name(model_name) hub_model_id<jupyter_output><empty_output><jupyter_text>Ensuite, créer un dépôt de modèle sur le 🤗 *Hub* et pousser notre modèle :<jupyter_code>from huggingface_hub import HfApi, create_repo create_repo(hub_model_id) api = HfApi() api.upload_folder( folder_path="my_pipeline/scheduler", path_in_repo="", repo_id=hub_model_id ) api.upload_folder(folder_path="my_pipeline/unet", path_in_repo="", repo_id=hub_model_id) api.upload_file( path_or_fileobj="my_pipeline/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, )<jupyter_output><empty_output><jupyter_text>La dernière chose à faire est de créer une belle carte modèle afin que notre générateur de papillons puisse être facilement trouvé sur le 🤗 *Hub* (n'hésitez pas à développer et à modifier la description !) :<jupyter_code>from huggingface_hub import ModelCard content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('{hub_model_id}') image = pipeline().images[0] image ``` """ card = ModelCard(content) card.push_to_hub(hub_model_id)<jupyter_output><empty_output><jupyter_text>Maintenant que le modèle est sur le *Hub*, vous pouvez le télécharger de n'importe où en utilisant la méthode `from_pretrained()` de `DDPMPipeline` comme suit :<jupyter_code>from diffusers import DDPMPipeline image_pipe = DDPMPipeline.from_pretrained(hub_model_id) pipeline_output = image_pipe() pipeline_output.images[0]<jupyter_output><empty_output><jupyter_text>Bien, ça marche ! Passer à l'échelle supérieure avec 🤗 *Accelerate*Ce *notebook* a été conçu à des fins d'apprentissage, et en tant que tel, nous avons essayé de garder le code aussi minimal et propre que possible. Pour cette raison, nous avons omis certaines choses que vous pourriez souhaiter si vous deviez entraîner un modèle plus grand sur beaucoup plus de données, comme le support multi-GPU, la trace de la progression et des images d'exemple, la sauvegarde du gradient pour supporter des tailles de batch plus importantes, le téléchargement automatique des modèles et ainsi de suite. Heureusement, la plupart de ces fonctionnalités sont disponibles dans l'exemple de script d'entraînement [ici](https://github.com/huggingface/diffusers/raw/main/examples/unconditional_image_generation/train_unconditional.py).Vous pouvez télécharger le fichier comme suit :<jupyter_code>!wget https://github.com/huggingface/diffusers/raw/main/examples/unconditional_image_generation/train_unconditional.py<jupyter_output><empty_output><jupyter_text>Ouvrez le fichier et vous verrez où le modèle est défini et quels sont les paramètres disponibles. Nous exécutons le script à l'aide de la commande suivante :<jupyter_code># Donnons un nom à notre nouveau modèle pour le Hub model_name = "sd-class-butterflies-64" hub_model_id = get_full_repo_name(model_name) hub_model_id !accelerate launch train_unconditional.py \ --dataset_name="huggan/smithsonian_butterflies_subset" \ --resolution=64 \ --output_dir={model_name} \ --train_batch_size=32 \ --num_epochs=50 \ --gradient_accumulation_steps=1 \ --learning_rate=1e-4 \ --lr_warmup_steps=500 \ --mixed_precision="no"<jupyter_output><empty_output><jupyter_text>Comme précédemment, poussons le modèle vers le *Hub* et créons une belle carte de modèle (et n'hésitez pas à l'éditer comme vous le souhaitez !):<jupyter_code>create_repo(hub_model_id) api = HfApi() api.upload_folder( folder_path=f"{model_name}/scheduler", path_in_repo="", repo_id=hub_model_id ) api.upload_folder( folder_path=f"{model_name}/unet", path_in_repo="", repo_id=hub_model_id ) api.upload_file( path_or_fileobj=f"{model_name}/model_index.json", path_in_repo="model_index.json", repo_id=hub_model_id, ) content = f""" --- license: mit tags: - pytorch - diffusers - unconditional-image-generation - diffusion-models-class --- # Model Card for Unit 1 of the [Diffusion Models Class 🧨](https://github.com/huggingface/diffusion-models-class) This model is a diffusion model for unconditional image generation of cute 🦋. ## Usage ```python from diffusers import DDPMPipeline pipeline = DDPMPipeline.from_pretrained('{hub_model_id}') image = pipeline().images[0] image ``` """ card = ModelCard(content) card.push_to_hub(hub_model_id)<jupyter_output><empty_output><jupyter_text>Environ 45 minutes plus tard, voici le résultat :<jupyter_code>pipeline = DDPMPipeline.from_pretrained(hub_model_id).to(device) images = pipeline(batch_size=8).images make_grid(images)<jupyter_output><empty_output>
diffusion-models-class/units/fr/unit1/introduction_to_diffusers.ipynb/0
{ "file_path": "diffusion-models-class/units/fr/unit1/introduction_to_diffusers.ipynb", "repo_id": "diffusion-models-class", "token_count": 11144 }
155
<jupyter_start><jupyter_text>Introduction à Stable DiffusionCe *notebook* va couvrir les bases de l'utilisation de Stable Diffusion pour créer et modifier des images en utilisant les pipelines existants. Nous allons également jeter un bref coup d'œil aux composants clés au sein du pipeline, tout en laissant une exploration plus approfondie de ces derniers au *notebook* de plongée profonde. Plus précisément, nous aborderons les points suivants :- Générer des images à partir d'un texte en utilisant le `StableDiffusionPipeline` et en expérimentant avec les arguments disponibles- Voir certains des composants clés du pipeline en action - Le VAE qui en fait un "modèle de diffusion latent". - Le *tokenizer* et l'encodeur qui traitent le prompt textuel - L'UNet lui-même - Le planificateur et l'exploration de différents planificateurs- Reproduction de la boucle d'échantillonnage avec les composants du pipeline- Édition d'images existantes avec le pipeline Img2Img- Utilisation des pipelines inpainting et Depth2Img❓ Si vous avez des questions, merci de les poster sur le canal `diffusion-models-class` du [serveur Discord d'Hugging Face](https://huggingface.co/join/discord). Configuration<jupyter_code>!pip install -Uq diffusers ftfy accelerate # Installer transformers à partir de la source car nous avons besoin de la dernière version pour Depth2Img !pip install -Uq git+https://github.com/huggingface/transformers import torch import requests from PIL import Image from io import BytesIO from matplotlib import pyplot as plt # Nous allons explorer un certain nombre de pipelines aujourd'hui ! from diffusers import ( StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionDepth2ImgPipeline ) # Nous utiliserons quelques images de démonstration plus loin dans le notebook def download_image(url): response = requests.get(url) return Image.open(BytesIO(response.content)).convert("RGB") # Télécharger des images pour l'exemple d'inpainting img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png" mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png" init_image = download_image(img_url).resize((512, 512)) mask_image = download_image(mask_url).resize((512, 512)) # Définir l'appareil device = ( "mps" if torch.backends.mps.is_available() else "cuda" if torch.cuda.is_available() else "cpu" )<jupyter_output><empty_output><jupyter_text>Générer des images à partir d'un texte Chargeons un pipeline Stable Diffusion et voyons ce qu'il peut faire. Il existe plusieurs versions de Stable Diffusion, la plus récente étant la version 2.1. Si vous souhaitez explorer une version plus ancienne, remplacez simplement l'ID du modèle par le modèle approprié (par exemple, vous pouvez essayer "CompVis/stable-diffusion-v1-4" ou choisir un modèle à partir de la [bibliothèque de concepts dreambooth](https://huggingface.co/sd-dreambooth-library)).<jupyter_code># Charger le pipeline model_id = "stabilityai/stable-diffusion-2-1-base" pipe = StableDiffusionPipeline.from_pretrained(model_id).to(device)<jupyter_output><empty_output><jupyter_text>Si vous manquez de mémoire GPU, vous pouvez faire certaines choses pour réduire l'utilisation de la RAM :- Chargez la version FP16 (non supportée par tous les systèmes). Avec cette version, vous devrez peut-être convertir les tenseurs en torch.float16 lorsque vous expérimenterez avec les composants individuels du pipeline :```pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16).to(device)```- Activer le découpage de l'attention. Cela permet de réduire l'utilisation de la mémoire du GPU au prix d'une légère réduction de la vitesse :```pipe.enable_attention_slicing()```- Réduire la taille des images généréesUne fois le pipeline chargé, nous pouvons générer une image sur la base d'un prompt avec le code suivant :<jupyter_code># Mise en place d'un générateur pour la reproductibilité generator = torch.Generator(device=device).manual_seed(42) # Exécuter le pipeline, en montrant certains des arguments disponibles pipe_output = pipe( prompt="Palette knife painting of an autumn cityscape", # Ce qu'il faut générer negative_prompt="Oversaturated, blurry, low quality", # Ce qu'il ne faut PAS générer height=480, width=640, # Spécifier la taille de l'image guidance_scale=8, # Comment suivre avec fermeté le prompt num_inference_steps=35, # Nombre d'étapes generator=generator # Graine aléatoire fixe ) # Visualiser l'image obtenue : pipe_output.images[0]<jupyter_output><empty_output><jupyter_text>**Exercice** : Passez un peu de temps à jouer avec la cellule ci-dessus en utilisant vos propres prompts et en modifiant les paramètres pour voir comment ils affectent la sortie. Utilisez une graine aléatoire différente ou supprimez l'argument du `generator` pour obtenir des résultats différents à chaque fois.Arguments clés à modifier :- La largeur et la hauteur spécifient la taille de l'image générée. Elles doivent être divisibles par 8 pour que le VAE fonctionne (ce que nous verrons dans une prochaine section).- Le nombre de pas influence la qualité de la génération. La valeur par défaut (50) fonctionne bien, mais dans certains cas, on peut se contenter de 20 pas, ce qui est pratique pour l'expérimentation.- Le prompt négatif est utilisé pendant le processus d'orientation sans classifieur et peut être un moyen utile d'ajouter un contrôle supplémentaire. Vous pouvez ne pas l'utiliser, mais de nombreux utilisateurs trouvent utile de lister certaines descriptions indésirables dans le prompt négatif, comme illustré ci-dessus.- L'argument guidance_scale détermine l'intensité du guidage sans classifieur (CFG pour *classifier-free guidance*). Des échelles plus élevées poussent les images générées à mieux correspondre au prompt, mais si l'échelle est trop élevée, les résultats peuvent devenir sursaturés et désagréables.Si vous souhaitez vous inspirer d'un prompt, le [Stable Diffusion Prompt Book] (https://app.usp.ai/static/Stable%20Diffusion%202.1%20Prompt%20Book%20by%20USP.ai.pdf) est un bon point de départ.<jupyter_code>cfg_scales = [1.1, 8, 12] #@param prompt = "A collie with a pink hat" #@param fig, axs = plt.subplots(1, len(cfg_scales), figsize=(16, 5)) for i, ax in enumerate(axs): im = pipe(prompt, height=480, width=480, guidance_scale=cfg_scales[i], num_inference_steps=35, generator=torch.Generator(device=device).manual_seed(42)).images[0] ax.imshow(im); ax.set_title(f'CFG Scale {cfg_scales[i]}');<jupyter_output><empty_output><jupyter_text>Modifiez les valeurs ci-dessus pour essayer différentes échelles et différents prompts. L'interprétation est bien sûr subjective, mais par expérience, toute valeur comprise entre 8 et 12 donne de meilleurs résultats que les valeurs inférieures ou supérieures à cette fourchette. Composants du pipeline Le `StableDiffusionPipeline` que nous utilisons est un peu plus complexe que le `DDPMPipeline` que nous avons exploré dans les unités précédentes. En plus du UNet et du planificateur, il y a un certain nombre d'autres composants inclus dans le pipeline :<jupyter_code>print(list(pipe.components.keys())) # Liste des composants<jupyter_output>['vae', 'text_encoder', 'tokenizer', 'unet', 'scheduler', 'safety_checker', 'feature_extractor']<jupyter_text>Pour mieux comprendre le fonctionnement du pipeline, voyons brièvement chaque composant en action individuellement, puis assemblons-les pour reproduire la fonctionnalité du pipeline. Le VAE Le VAE (auto-encodeur variationnel) est un type de modèle capable d'encoder son entrée dans une représentation comprimée, puis de décoder cette représentation latente pour la rendre proche de l'entrée d'origine. Lors de la génération d'images avec diffusion stable, nous générons d'abord les latents en appliquant le processus de diffusion dans l'espace latent du VAE, puis nous les décodons à la fin pour visualiser l'image résultante.Voici un code qui prend une image d'entrée, l'encode dans une représentation latente et la décode à nouveau à l'aide de la VAE :<jupyter_code># Créez de fausses données (une image aléatoire, une plage (-1, 1)) images = torch.rand(1, 3, 512, 512).to(device) * 2 - 1 print("Input images shape:", images.shape) # Encoder dans l'espace latent with torch.no_grad(): latents = 0.18215 * pipe.vae.encode(images).latent_dist.mean print("Encoded latents shape:", latents.shape) # Décoder à nouveau with torch.no_grad(): decoded_images = pipe.vae.decode(latents / 0.18215).sample print("Decoded images shape:", decoded_images.shape)<jupyter_output>Input images shape: torch.Size([1, 3, 512, 512]) Encoded latents shape: torch.Size([1, 4, 64, 64]) Decoded images shape: torch.Size([1, 3, 512, 512])<jupyter_text>Comme vous pouvez le constater, l'image 512x512 est compressée en une représentation latente 64x64 (avec quatre canaux). Cette réduction par 8 de chaque dimension spatiale est la raison pour laquelle la largeur et la hauteur spécifiées doivent être des multiples de 8.Travailler avec ces latents 4x64x64 riches en informations est plus efficace que de travailler avec des images massives de 512 px, ce qui permet d'obtenir des modèles de diffusion plus rapides dont l'entraînement et l'utilisation nécessitent moins de ressources. Le processus de décodage du VAE n'est pas parfait, mais il est suffisamment bon pour que le petit compromis de qualité en vaille généralement la peine. NB : L'exemple de code ci-dessus inclut un facteur d'échelle de 0,18215 nécessaire pour correspondre au traitement utilisé lors de l'entraînement de SD. Le *tokenizer* et l'encodeur L'objectif de l'encodeur est de transformer une chaîne d'entrée (le prompt) en une représentation numérique qui peut être transmise à l'UNet en tant que conditionnement. Le texte est d'abord transformé en une série de *tokens* à l'aide du *tokenizer* du pipeline. L'encodeur dispose d'un vocabulaire d'environ 50K *tokens* et tout mot ne figurant pas dans ce vocabulaire est divisé en sous-mots plus petits. Les *tokens* sont ensuite transmis à l'encodeur lui-même : un transformer qui a été entraîné à l'origine comme encodeur pour CLIP. Nous espérons que ce transformer pré-entraîné a appris des représentations riches du texte qui seront également utiles pour la tâche de diffusion.Testons ce processus en encodant un prompt d'exemple, d'abord en le tokenizant manuellement et en le faisant passer par l'encodeur puis en utilisant la méthode `_encode_prompt` pour montrer le processus complet, y compris le rembourrage/la troncature de la longueur jusqu'à la longueur maximale de 77 *tokens* :<jupyter_code># Tokenizer et encoder un prompt d'exemple manuellement : # Tokenizer input_ids = pipe.tokenizer(["A painting of a flooble"])['input_ids'] print("Input ID -> decoded token") for input_id in input_ids[0]: print(f"{input_id} -> {pipe.tokenizer.decode(input_id)}") # Passage par l'encodeur de texte CLIP input_ids = torch.tensor(input_ids).to(device) with torch.no_grad(): text_embeddings = pipe.text_encoder(input_ids)['last_hidden_state'] print("Text embeddings shape:", text_embeddings.shape) # Obtenir les enchâssements finaux à l'aide de la fonction _encode_prompt du pipeline : text_embeddings = pipe._encode_prompt("A painting of a flooble", device, 1, False, '') text_embeddings.shape<jupyter_output><empty_output><jupyter_text>Ces enchâssements (les "états cachés" du dernier bloc de transformation dans le modèle de l'encodeur) seront transmis à l'UNet en tant qu'argument supplémentaire de la méthode `forward`, que nous verrons dans la section suivante. L'UNet L'UNet prend une entrée bruitée et prédit le bruit, tout comme les UNets que nous avons vus dans les unités précédentes. Contrairement aux exemples précédents, l'entrée n'est pas une image mais une représentation latente d'une image. En plus du conditionnement du pas de temps, ce UNet prend également en compte l'enchâssement du prompt en tant qu'entrée supplémentaire. Ici, il fait des prédictions sur des données fictives :<jupyter_code># Entrées fictives timestep = pipe.scheduler.timesteps[0] latents = torch.randn(1, 4, 64, 64).to(device) text_embeddings = torch.randn(1, 77, 1024).to(device) # Prédiction du modèle with torch.no_grad(): unet_output = pipe.unet(latents, timestep, text_embeddings).sample print('UNet output shape:', unet_output.shape) # Same shape as the input latents<jupyter_output>UNet output shape: torch.Size([1, 4, 64, 64])<jupyter_text>Le planificateurLe planificateur stocke le plan de bruit et gère la mise à jour de l'échantillon bruité sur la base des prédictions du modèle. Le planificateur par défaut est un `PNDMScheduler`, mais vous pouvez en utiliser d'autres (comme `LMSDiscreteScheduler`) tant qu'ils sont initialisés avec la même configuration.Nous pouvons tracer le plan de bruit pour voir le niveau de bruit (basé sur $bar{\alpha}$) au fil du temps :<jupyter_code>plt.plot(pipe.scheduler.alphas_cumprod, label=r'$\bar{\alpha}$') plt.xlabel('Timestep (high noise to low noise ->)'); plt.title('Noise schedule');plt.legend();<jupyter_output><empty_output><jupyter_text>Si vous souhaitez essayer un autre planificateur, vous pouvez le remplacer comme suit :<jupyter_code>from diffusers import LMSDiscreteScheduler # Remplacer le planificateur pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config) # Afficher la configuration print('Scheduler config:', pipe.scheduler) # Générer une image avec ce nouveau plannificateur pipe(prompt="Palette knife painting of an winter cityscape", height=480, width=480, generator=torch.Generator(device=device).manual_seed(42)).images[0]<jupyter_output>Scheduler config: LMSDiscreteScheduler { "_class_name": "LMSDiscreteScheduler", "_diffusers_version": "0.11.1", "beta_end": 0.012, "beta_schedule": "scaled_linear", "beta_start": 0.00085, "clip_sample": false, "num_train_timesteps": 1000, "prediction_type": "epsilon", "set_alpha_to_one": false, "skip_prk_steps": true, "steps_offset": 1, "trained_betas": null }<jupyter_text>Vous pouvez lire plus de détails sur l'utilisation de différents planificateurs [ici](https://huggingface.co/docs/diffusers/v0.17.1/en/api/pipelines/stable_diffusion/overviewhow-to-load-and-use-different-schedulers). Une boucle d'échantillonnage par vous-mêmeMaintenant que nous avons vu tous ces composants en action, nous pouvons les assembler pour reproduire la fonctionnalité du pipeline :<jupyter_code>guidance_scale = 8 #@param num_inference_steps=30 #@param prompt = "Beautiful picture of a wave breaking" #@param negative_prompt = "zoomed in, blurry, oversaturated, warped" #@param # Encoder le prompt text_embeddings = pipe._encode_prompt(prompt, device, 1, True, negative_prompt) # Créer notre point de départ aléatoire latents = torch.randn((1, 4, 64, 64), device=device, generator=generator) latents *= pipe.scheduler.init_noise_sigma # Preparer le planificateur pipe.scheduler.set_timesteps(num_inference_steps, device=device) # Boucle sur les pas de temps d'échantillonnage for i, t in enumerate(pipe.scheduler.timesteps): # développer les latents si l'on procède à un guidage sans classifieur latent_model_input = torch.cat([latents] * 2) # Appliquer tout échelonnement requis par le planificateur latent_model_input = pipe.scheduler.scale_model_input(latent_model_input, t) # prédire le bruit résiduel avec l'UNet with torch.no_grad(): noise_pred = pipe.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample # réaliser un guidage noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # calculer l'échantillon bruité précédent x_t -> x_t-1 latents = pipe.scheduler.step(noise_pred, t, latents).prev_sample # Décoder les latents résultants en une image with torch.no_grad(): image = pipe.decode_latents(latents.detach()) # Visualiser pipe.numpy_to_pil(image)[0]<jupyter_output><empty_output><jupyter_text>Dans la plupart des cas, il sera plus facile d'utiliser les pipelines existants, mais le fait de disposer de cette boucle d'échantillonnage bidouillable peut s'avérer utile pour comprendre et modifier le fonctionnement de chaque composant. Si vous souhaitez voir ce code et tous les différents composants explorés et modifiés en profondeur, consultez le [*notebook*](https://github.com/fastai/diffusion-nbs/blob/master/Stable%20Diffusion%20Deep%20Dive.ipynb) et la [video](https://m.youtube.com/watch?v=0_BBRNYInx8) 'Stable Diffusion Deep Dive' pour une exploration plus approfondie. Pipelines supplémentairesQue pouvons-nous faire d'autre que de générer des images à partir d'un prompt ? Beaucoup de choses ! Dans cette section, nous allons démontrer quelques pipelines sympas pour vous donner un avant-goût des autres tâches pour lesquelles Stable Diffusion peut être utilisé. Plusieurs d'entre eux nécessitent le téléchargement de nouveaux modèles, donc si vous êtes pressé, vous pouvez parcourir cette section en vous contentant de regarder les résultats existants plutôt que de télécharger et d'exécuter tous les modèles vous-même. Img2Img Dans les exemples présentés jusqu'à présent, nous avons généré des images en partant de latents aléatoires et en appliquant la boucle complète d'échantillonnage par diffusion. Mais il n'est pas nécessaire de partir de zéro. Le pipeline Img2Img encode d'abord une image existante dans un ensemble de latents, puis ajoute du bruit aux latents et utilise cette image comme point de départ. La quantité de bruit ajoutée et le nombre d'étapes de débruitage appliquées déterminent la "force" du processus Img2Img. L'ajout d'une petite quantité de bruit (force faible) n'entraînera que très peu de changements, tandis que l'ajout d'une quantité maximale de bruit et l'exécution du processus de débruitage complet donneront une image qui ne ressemblera guère à l'image d'entrée, hormis quelques similitudes au niveau de la structure générale.Ce pipeline ne nécessite pas de modèles particuliers, et tant que l'ID du modèle est le même que celui de notre exemple texte-image ci-dessus, aucun nouveau fichier ne devra être téléchargé.<jupyter_code># Chargement d'un pipeline Img2Img model_id = "stabilityai/stable-diffusion-2-1-base" img2img_pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id).to(device)<jupyter_output><empty_output><jupyter_text>Dans la section Configuration nous avons chargé un exemple `init_image` à utiliser pour cette démo, mais vous pouvez le remplacer par votre propre image si vous préférez. Voici le pipeline en action :<jupyter_code># Appliquer Img2Img result_image = img2img_pipe( prompt="An oil painting of a man on a bench", image = init_image, # L'image de départ strength = 0.6, # 0 pour aucun changement, 1.0 pour une force maximale ).images[0] # Voir le résultat fig, axs = plt.subplots(1, 2, figsize=(12, 5)) axs[0].imshow(init_image);axs[0].set_title('Input Image') axs[1].imshow(result_image);axs[1].set_title('Result');<jupyter_output><empty_output><jupyter_text>**Exercice** : Expérimentez avec ce pipeline. Essayez vos propres images, ou jouez avec différentes forces et différents prompts. Vous pouvez utiliser les mêmes arguments que pour le pipeline texte-image, alors n'hésitez pas à essayer différentes tailles, différents nombres d'étapes, etc. Inpainting Que se passerait-il si nous voulions conserver une partie de l'image d'entrée inchangée mais générer quelque chose de nouveau dans d'autres parties ? C'est ce qu'on appelle l'*inpainting*. Bien qu'il soit possible de le faire avec le même modèle que les démonstrations précédentes (via `StableDiffusionInpaintPipelineLegacy`), nous pouvons obtenir de meilleurs résultats en utilisant une version *finetunée* personnalisée de Stable Diffusion qui prend un masque comme condition supplémentaire. L'image du masque doit avoir la même forme que l'image d'entrée, avec du blanc dans les zones à remplacer et du noir dans les zones à garder inchangées. Voici comment charger un tel pipeline et l'appliquer à l'image d'exemple et au masque chargés dans la section Configuration :<jupyter_code># Charger le pipeline d'inpainting (nécessite un modèle d'inpainting approprié) pipe = StableDiffusionInpaintPipeline.from_pretrained("runwayml/stable-diffusion-inpainting") pipe = pipe.to(device) # Inpaint avec un prompt pour avoir le résultat souhaité prompt = "A small robot, high resolution, sitting on a park bench" image = pipe(prompt=prompt, image=init_image, mask_image=mask_image).images[0] # Voir le résultat fig, axs = plt.subplots(1, 3, figsize=(16, 5)) axs[0].imshow(init_image);axs[0].set_title('Input Image') axs[1].imshow(mask_image);axs[1].set_title('Mask') axs[2].imshow(image);axs[2].set_title('Result');<jupyter_output><empty_output><jupyter_text>Ce modèle peut être particulièrement puissant lorsqu'il est combiné à un autre modèle pour générer automatiquement des masques. Par exemple, ce [Space](https://huggingface.co/spaces/nielsr/text-based-inpainting) utilise un modèle appelé CLIPSeg pour masquer un objet à remplacer sur la base d'une description textuelle. En marge : gestion du cache de votre modèleL'exploration de différents pipelines et variantes de modèles peut remplir votre espace disque. Vous pouvez voir quels modèles sont actuellement téléchargés avec :<jupyter_code>!ls ~/.cache/huggingface/diffusers/ # Liste du contenu du répertoire cache<jupyter_output>models--CompVis--stable-diffusion-v1-4 models--ddpm-bedroom-256 models--google--ddpm-bedroom-256 models--google--ddpm-celebahq-256 models--runwayml--stable-diffusion-inpainting models--stabilityai--stable-diffusion-2-1-base<jupyter_text>Consultez la [documentation](https://huggingface.co/docs/huggingface_hub/main/guides/manage-cache) sur la mise en cache pour savoir comment visualiser et gérer efficacement votre cache. Depth2Image Image d'entrée, image de profondeur et exemples générés (source de l'image : StabilityAI) Img2Img est très bien, mais parfois nous voulons créer une nouvelle image avec la composition de l'original mais avec des couleurs ou des textures complètement différentes. Il peut être difficile de trouver une force d'Img2Img qui préserve ce que nous voulons de la mise en page sans conserver les couleurs d'entrée.Il est temps d'adopter un autre modèle *finetuné* ! Celui-ci prend en compte les informations de profondeur comme condition supplémentaire lors de la génération. Le pipeline utilise un modèle d'estimation de la profondeur pour créer une carte de profondeur, qui est ensuite transmise au UNet *finetuné* lors de la génération d'images afin de préserver (si possible) la profondeur et la structure de l'image initiale tout en remplissant un contenu complètement nouveau.<jupyter_code># Charger le pipeline Depth2Img (nécessite un modèle approprié) pipe = StableDiffusionDepth2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-depth") pipe = pipe.to(device) # Inpaint avec un prompt pour avoir le résultat souhaité prompt = "An oil painting of a man on a bench" image = pipe(prompt=prompt, image=init_image).images[0] # Voir le résultat fig, axs = plt.subplots(1, 2, figsize=(16, 5)) axs[0].imshow(init_image);axs[0].set_title('Input Image') axs[1].imshow(image);axs[1].set_title('Result');<jupyter_output><empty_output>
diffusion-models-class/units/fr/unit3/stable_diffusion_introduction.ipynb/0
{ "file_path": "diffusion-models-class/units/fr/unit3/stable_diffusion_introduction.ipynb", "repo_id": "diffusion-models-class", "token_count": 8445 }
156
<jupyter_start><jupyter_text>Derrière le pipeline (TensorFlow) Installez la bibliothèque 🤗 *Transformers* pour exécuter ce *notebook*.<jupyter_code>!pip install transformers[sentencepiece] from transformers import pipeline classifier = pipeline("sentiment-analysis", model="tblard/tf-allocine") classifier( ["J'ai attendu un cours d'HuggingFace toute ma vie.", "Je déteste tellement ça !"] ) from transformers import AutoTokenizer checkpoint = "tblard/tf-allocine" tokenizer = AutoTokenizer.from_pretrained(checkpoint) raw_inputs = [ "J'ai attendu un cours d'HuggingFace toute ma vie.", "Je déteste tellement ça !", ] inputs = tokenizer(raw_inputs, padding=True, truncation=True, return_tensors="tf") print(inputs) from transformers import AutoModel checkpoint = "tblard/tf-allocine" model = AutoModel.from_pretrained(checkpoint, from_tf=True) outputs = model(**inputs) print(outputs.last_hidden_state.shape) from transformers import AutoModelForSequenceClassification checkpoint = "tblard/tf-allocine" model = AutoModelForSequenceClassification.from_pretrained(checkpoint, from_tf=True) outputs = model(**inputs) print(outputs.logits.shape) print(outputs.logits) import tensorflow as tf predictions = tf.math.softmax(outputs.logits, axis=-1) print(predictions) model.config.id2label<jupyter_output><empty_output>
notebooks/course/fr/chapter2/section2_tf.ipynb/0
{ "file_path": "notebooks/course/fr/chapter2/section2_tf.ipynb", "repo_id": "notebooks", "token_count": 473 }
157
<jupyter_start><jupyter_text>Utilisation de modèles pré-entraînés (PyTorch) Installez la bibliothèque 🤗 Transformers pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] from transformers import pipeline camembert_fill_mask = pipeline("fill-mask", model="camembert-base") results = camembert_fill_mask("Le camembert est <mask> :)") from transformers import CamembertTokenizer, CamembertForMaskedLM tokenizer = CamembertTokenizer.from_pretrained("camembert-base") model = CamembertForMaskedLM.from_pretrained("camembert-base") from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("camembert-base") model = AutoModelForMaskedLM.from_pretrained("camembert-base")<jupyter_output><empty_output>
notebooks/course/fr/chapter4/section2_pt.ipynb/0
{ "file_path": "notebooks/course/fr/chapter4/section2_pt.ipynb", "repo_id": "notebooks", "token_count": 259 }
158
<jupyter_start><jupyter_text>Normalisation et prétokenization. Installez les bibliothèques 🤗 *Transformers* et 🤗 *Datasets* pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("camembert-base") print(type(tokenizer.backend_tokenizer)) print(tokenizer.backend_tokenizer.normalizer.normalize_str("Héllò hôw are ü?")) # Ne semble pas marcher sur le français tokenizer_fr = AutoTokenizer.from_pretrained("camembert-base") tokenizer_fr.backend_tokenizer.normalizer.normalize_str("Bönjoùr commènt vas tü ?") tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str("Hello, how are you?") tokenizer = AutoTokenizer.from_pretrained("gpt2") tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str("Hello, how are you?") tokenizer = AutoTokenizer.from_pretrained("t5-small") tokenizer.backend_tokenizer.pre_tokenizer.pre_tokenize_str("Hello, how are you?")<jupyter_output><empty_output>
notebooks/course/fr/chapter6/section4.ipynb/0
{ "file_path": "notebooks/course/fr/chapter6/section4.ipynb", "repo_id": "notebooks", "token_count": 362 }
159
<jupyter_start><jupyter_text>Réponses aux questions (TensorFlow) Installez les bibliothèques Transformers et Datasets pour exécuter ce *notebook*.<jupyter_code>!pip install datasets transformers[sentencepiece] !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Vous aurez besoin de configurer git, adaptez votre email et votre nom dans la cellule suivante.<jupyter_code>!git config --global user.email "[email protected]" !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Vous devrez également être connecté au Hub d'Hugging Face. Exécutez ce qui suit et entrez vos informations d'identification.<jupyter_code>from huggingface_hub import notebook_login notebook_login() from datasets import load_dataset raw_datasets = load_dataset("piaf") # piaf n'ayant pas de jeu de données de validation, nous en créons un raw_datasets = raw_datasets['train'] raw_datasets = raw_datasets.train_test_split(test_size=0.2, shuffle=True) raw_datasets print("Context: ", raw_datasets["train"][0]["context"]) print("Question: ", raw_datasets["train"][0]["question"]) print("Answer: ", raw_datasets["train"][0]["answers"]) raw_datasets["train"].filter(lambda x: len(x["answers"]["text"]) != 1) print(raw_datasets["test"][0]["answers"]) print(raw_datasets["test"][2]["answers"]) print(raw_datasets["test"][2]["context"]) print(raw_datasets["test"][2]["question"]) from transformers import AutoTokenizer model_checkpoint = "camembert-base" tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) tokenizer.is_fast context = raw_datasets["train"][0]["context"] question = raw_datasets["train"][0]["question"] inputs = tokenizer(question, context) tokenizer.decode(inputs["input_ids"]) inputs = tokenizer( question, context, max_length=100, truncation="only_second", stride=50, return_overflowing_tokens=True, ) for ids in inputs["input_ids"]: print(tokenizer.decode(ids)) inputs = tokenizer( question, context, max_length=100, truncation="only_second", stride=50, return_overflowing_tokens=True, return_offsets_mapping=True, ) inputs.keys() inputs["overflow_to_sample_mapping"] inputs = tokenizer( raw_datasets["train"][2:6]["question"], raw_datasets["train"][2:6]["context"], max_length=100, truncation="only_second", stride=50, return_overflowing_tokens=True, return_offsets_mapping=True, ) print(f"The 4 examples gave {len(inputs['input_ids'])} features.") print(f"Here is where each comes from: {inputs['overflow_to_sample_mapping']}.") answers = raw_datasets["train"][2:6]["answers"] start_positions = [] end_positions = [] for i, offset in enumerate(inputs["offset_mapping"]): sample_idx = inputs["overflow_to_sample_mapping"][i] answer = answers[sample_idx] start_char = answer["answer_start"][0] end_char = answer["answer_start"][0] + len(answer["text"][0]) sequence_ids = inputs.sequence_ids(i) # Trouver le début et la fin du contexte idx = 0 while sequence_ids[idx] != 1: idx += 1 context_start = idx while sequence_ids[idx] == 1: idx += 1 context_end = idx - 1 # Si la réponse n'est pas entièrement dans le contexte, l'étiquette est (0, 0) if offset[context_start][0] > start_char or offset[context_end][1] < end_char: start_positions.append(0) end_positions.append(0) else: # Sinon, ce sont les positions de début et de fin du token idx = context_start while idx <= context_end and offset[idx][0] <= start_char: idx += 1 start_positions.append(idx - 1) idx = context_end while idx >= context_start and offset[idx][1] >= end_char: idx -= 1 end_positions.append(idx + 1) start_positions, end_positions idx = 0 sample_idx = inputs["overflow_to_sample_mapping"][idx] answer = answers[sample_idx]["text"][0] start = start_positions[idx] end = end_positions[idx] labeled_answer = tokenizer.decode(inputs["input_ids"][idx][start : end + 1]) print(f"Theoretical answer: {answer}, labels give: {labeled_answer}") idx = 4 sample_idx = inputs["overflow_to_sample_mapping"][idx] answer = answers[sample_idx]["text"][0] decoded_example = tokenizer.decode(inputs["input_ids"][idx]) print(f"Theoretical answer: {answer}, decoded example: {decoded_example}") max_length = 384 stride = 128 def preprocess_training_examples(examples): questions = [q.strip() for q in examples["question"]] inputs = tokenizer( questions, examples["context"], max_length=max_length, truncation="only_second", stride=stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) offset_mapping = inputs.pop("offset_mapping") sample_map = inputs.pop("overflow_to_sample_mapping") answers = examples["answers"] start_positions = [] end_positions = [] for i, offset in enumerate(offset_mapping): sample_idx = sample_map[i] answer = answers[sample_idx] start_char = answer["answer_start"][0] end_char = answer["answer_start"][0] + len(answer["text"][0]) sequence_ids = inputs.sequence_ids(i) # Trouver le début et la fin du contexte idx = 0 while sequence_ids[idx] != 1: idx += 1 context_start = idx while sequence_ids[idx] == 1: idx += 1 context_end = idx - 1 # Si la réponse n'est pas entièrement dans le contexte, l'étiquette est (0, 0) if offset[context_start][0] > start_char or offset[context_end][1] < end_char: start_positions.append(0) end_positions.append(0) else: # Sinon, ce sont les positions de début et de fin du token idx = context_start while idx <= context_end and offset[idx][0] <= start_char: idx += 1 start_positions.append(idx - 1) idx = context_end while idx >= context_start and offset[idx][1] >= end_char: idx -= 1 end_positions.append(idx + 1) inputs["start_positions"] = start_positions inputs["end_positions"] = end_positions return inputs train_dataset = raw_datasets["train"].map( preprocess_training_examples, batched=True, remove_columns=raw_datasets["train"].column_names, ) len(raw_datasets["train"]), len(train_dataset) def preprocess_validation_examples(examples): questions = [q.strip() for q in examples["question"]] inputs = tokenizer( questions, examples["context"], max_length=max_length, truncation="only_second", stride=stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) sample_map = inputs.pop("overflow_to_sample_mapping") example_ids = [] for i in range(len(inputs["input_ids"])): sample_idx = sample_map[i] example_ids.append(examples["id"][sample_idx]) sequence_ids = inputs.sequence_ids(i) offset = inputs["offset_mapping"][i] inputs["offset_mapping"][i] = [ o if sequence_ids[k] == 1 else None for k, o in enumerate(offset) ] inputs["example_id"] = example_ids return inputs validation_dataset = raw_datasets["test"].map( preprocess_validation_examples, batched=True, remove_columns=raw_datasets["test"].column_names, ) len(raw_datasets["test"]), len(validation_dataset) small_eval_set = raw_datasets["test"].select(range(100)) trained_checkpoint = "etalab-ia/camembert-base-squadFR-fquad-piaf" tokenizer = AutoTokenizer.from_pretrained(trained_checkpoint) eval_set = small_eval_set.map( preprocess_validation_examples, batched=True, remove_columns=raw_datasets["test"].column_names, ) import tensorflow as tf from transformers import TFAutoModelForQuestionAnswering eval_set_for_model = eval_set.remove_columns(["example_id", "offset_mapping"]) eval_set_for_model.set_format("numpy") batch = {k: eval_set_for_model[k] for k in eval_set_for_model.column_names} trained_model = TFAutoModelForQuestionAnswering.from_pretrained(trained_checkpoint) outputs = trained_model(**batch) start_logits = outputs.start_logits.numpy() end_logits = outputs.end_logits.numpy() import collections example_to_features = collections.defaultdict(list) for idx, feature in enumerate(eval_set): example_to_features[feature["example_id"]].append(idx) import numpy as np n_best = 20 max_answer_length = 30 predicted_answers = [] for example in small_eval_set: example_id = example["id"] context = example["context"] answers = [] for feature_index in example_to_features[example_id]: start_logit = start_logits[feature_index] end_logit = end_logits[feature_index] offsets = eval_set["offset_mapping"][feature_index] start_indexes = np.argsort(start_logit)[-1 : -n_best - 1 : -1].tolist() end_indexes = np.argsort(end_logit)[-1 : -n_best - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Ignorez les réponses qui ne sont pas entièrement dans le contexte if offsets[start_index] is None or offsets[end_index] is None: continue # Ignorer les réponses dont la longueur est soit < 0 soit > max_answer_length if ( end_index < start_index or end_index - start_index + 1 > max_answer_length ): continue answers.append( { "text": context[offsets[start_index][0] : offsets[end_index][1]], "logit_score": start_logit[start_index] + end_logit[end_index], } ) best_answer = max(answers, key=lambda x: x["logit_score"]) predicted_answers.append({"id": example_id, "prediction_text": best_answer["text"]}) from datasets import load_metric metric = load_metric("squad") theoretical_answers = [ {"id": ex["id"], "answers": ex["answers"]} for ex in small_eval_set ] print(predicted_answers[0]) print(theoretical_answers[0]) metric.compute(predictions=predicted_answers, references=theoretical_answers) from tqdm.auto import tqdm def compute_metrics(start_logits, end_logits, features, examples): example_to_features = collections.defaultdict(list) for idx, feature in enumerate(features): example_to_features[feature["example_id"]].append(idx) predicted_answers = [] for example in tqdm(examples): example_id = example["id"] context = example["context"] answers = [] # Parcourir en boucle toutes les fonctionnalités associées à cet exemple for feature_index in example_to_features[example_id]: start_logit = start_logits[feature_index] end_logit = end_logits[feature_index] offsets = features[feature_index]["offset_mapping"] start_indexes = np.argsort(start_logit)[-1 : -n_best - 1 : -1].tolist() end_indexes = np.argsort(end_logit)[-1 : -n_best - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Ignorez les réponses qui ne sont pas entièrement dans le contexte if offsets[start_index] is None or offsets[end_index] is None: continue # Sauter les réponses dont la longueur est soit < 0, soit > max_answer_length if ( end_index < start_index or end_index - start_index + 1 > max_answer_length ): continue answer = { "text": context[offsets[start_index][0] : offsets[end_index][1]], "logit_score": start_logit[start_index] + end_logit[end_index], } answers.append(answer) # Sélectionnez la réponse avec le meilleur score if len(answers) > 0: best_answer = max(answers, key=lambda x: x["logit_score"]) predicted_answers.append( {"id": example_id, "prediction_text": best_answer["text"]} ) else: predicted_answers.append({"id": example_id, "prediction_text": ""}) theoretical_answers = [{"id": ex["id"], "answers": ex["answers"]} for ex in examples] return metric.compute(predictions=predicted_answers, references=theoretical_answers) compute_metrics(start_logits, end_logits, eval_set, small_eval_set) model = TFAutoModelForQuestionAnswering.from_pretrained(model_checkpoint) from transformers import DefaultDataCollator data_collator = DefaultDataCollator(return_tensors="tf") tf_train_dataset = model.prepare_tf_dataset( train_dataset, collate_fn=data_collator, shuffle=True, batch_size=16) tf_eval_dataset = model.prepare_tf_dataset( validation_dataset, collate_fn=data_collator, shuffle=False, batch_size=16) from transformers import create_optimizer from transformers.keras_callbacks import PushToHubCallback import tensorflow as tf # Le nombre d'étapes d'entraînement est le nombre d'échantillons dans le jeu de données, divisé par la taille du batch puis multiplié # par le nombre total d'époques. Notez que le jeu de données tf_train_dataset est ici un batch de données tf.data.Dataset, # pas le jeu de données original Hugging Face, donc son len() est déjà num_samples // batch_size. num_train_epochs = 3 num_train_steps = len(tf_train_dataset) * num_train_epochs optimizer, schedule = create_optimizer( init_lr=2e-5, num_warmup_steps=0, num_train_steps=num_train_steps, weight_decay_rate=0.01, ) model.compile(optimizer=optimizer) # Entraîner en mixed-precision float16 tf.keras.mixed_precision.set_global_policy("mixed_float16") from transformers.keras_callbacks import PushToHubCallback callback = PushToHubCallback(output_dir="camembert-base-finetuned-piaf", tokenizer=tokenizer) # Nous allons faire la validation après, donc pas de validation au milieu de l'entraînement model.fit(tf_train_dataset, callbacks=[callback], epochs=num_train_epochs) predictions = model.predict(tf_eval_dataset) compute_metrics( predictions["start_logits"], predictions["end_logits"], validation_dataset, raw_datasets["test"], ) from transformers import pipeline # Remplacez par votre propre checkpoint model_checkpoint = "huggingface-course/camembert-finetuned-piaf" question_answerer = pipeline("question-answering", model=model_checkpoint) context = """ 🤗 Transformers est soutenu par les trois bibliothèques d'apprentissage profond les plus populaires - Jax, PyTorch et TensorFlow - avec une intégration transparente entre elles. Il est simple d'entraîner vos modèles avec l'une avant de les charger pour l'inférence avec l'autre. """ question = "Quelles sont les bibliothèques d'apprentissage profond derrière 🤗 Transformers ?" question_answerer(question=question, context=context)<jupyter_output><empty_output>
notebooks/course/fr/chapter7/section7_tf.ipynb/0
{ "file_path": "notebooks/course/fr/chapter7/section7_tf.ipynb", "repo_id": "notebooks", "token_count": 6501 }
160
<jupyter_start><jupyter_text>ver since Stable Diffusion took the world by storm, people have been looking for ways to have more control over the results of the generation process. ControlNet provides a minimal interface allowing users to customize the generation process up to a great extent. With [ControlNet](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet), users can easily condition the generation with different spatial contexts such as a depth map, a segmentation map, a scribble, keypoints, and so on!We can turn a cartoon drawing into a realistic photo with incredible coherence. Realistic Lofi Girl Or even use it as your interior designer. Before After You can turn your sketch scribble into an artistic drawing. Before After Also, make some of the famous logos coming to life. Before After With ControlNet, the sky is the limit 🌠 In this notebook, we first introduce the [`StableDiffusionControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet) and then show how it can be applied for various control conditionings. Let’s get controlling! ControlNet: TL;DRControlNet was introduced in [Adding Conditional Control to Text-to-Image Diffusion Models](https://arxiv.org/abs/2302.05543) by Lvmin Zhang and Maneesh Agrawala. It introduces a framework that allows for supporting various spatial contexts that can serve as additional conditionings to Diffusion models such as Stable Diffusion. Training ControlNet is comprised of the following steps:1. Cloning the pre-trained parameters of a Diffusion model, such as Stable Diffusion's latent UNet, (referred to as “trainable copy”) while also maintaining the pre-trained parameters separately (”locked copy”). It is done so that the locked parameter copy can preserve the vast knowledge learned from a large dataset, whereas the trainable copy is employed to learn task-specific aspects. 2. The trainable and locked copies of the parameters are connected via “zero convolution” layers (see [here](https://github.com/lllyasviel/ControlNetcontrolnet) for more information) which are optimized as a part of the ControlNet framework. This is a training trick to preserve the semantics already learned by frozen model as the new conditions are trained.Pictorially, training a ControlNet looks like so: The diagram is taken from here.A sample from the training set for ControlNet-like training looks like this (additional conditioning is via edge maps): Prompt Original Image Conditioning "bird" Similarly, if we were to condition ControlNet with semantic segmentation maps, a training sample would be like so: Prompt Original Image Conditioning "big house" Every new type of conditioning requires training a new copy of ControlNet weights. The paper proposed 8 different conditioning models that are all [supported](https://huggingface.co/lllyasviel?search=controlnet) in Diffusers! For inference, both the pre-trained diffusion models weights as well as the trained ControlNet weights are needed. For example, using [Stable Diffusion v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) with a ControlNet checkpoint require roughly 700 million more parameters compared to just using the original Stable Diffusion model, which makes ControlNet a bit more memory-expensive for inference.Because the pre-trained diffusion models are looked during training, one only needs to switch out the ControlNet parameters when using a different conditioning. This makes it fairly simple to deploy multiple ControlNet weights in one application as we will see below. The `StableDiffusionControlNetPipeline`Before we begin, we want to give a huge shout-out to the community contributor [Takuma Mori](https://github.com/takuma104) for having led the integration of ControlNet into Diffusers ❤️ .To experiment with ControlNet, Diffusers exposes the [`StableDiffusionControlNetPipeline`](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnet) similar tothe [other Diffusers pipelines](https://huggingface.co/docs/diffusers/api/pipelines/overview). Central to the [`StableDiffusionControlNetPipeline`] is the `controlnet` argument which lets us provide a particular trained [`ControlNetModel`](https://huggingface.co/docs/diffusers/main/en/api/modelsdiffusers.ControlNetModel) instance while keeping the pre-trained diffusion model weights the same.We will explore different use cases with the `StableDiffusionControlNetPipeline` in this blog post. The first ControlNet model we are going to walk through is the [Canny model](https://huggingface.co/runwayml/stable-diffusion-v1-5) - this is one of the most popular models that generated some of the amazing images you are libely seeing on the internet.We welcome you to run the code snippets shown in the sections below with [this Colab Notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb).Before we begin, let's make sure we have all the necessary libraries installed:<jupyter_code>!pip install -q diffusers==0.14.0 transformers xformers git+https://github.com/huggingface/accelerate.git<jupyter_output><empty_output><jupyter_text>To process different conditionings depending on the chosen ControlNet, we also need to install some additional dependencies:- [OpenCV](https://opencv.org/)- [controlnet-aux](https://github.com/patrickvonplaten/controlnet_auxcontrolnet-auxiliary-models) - a simple collection of pre-processing models for ControlNet<jupyter_code>!pip install -q opencv-contrib-python !pip install -q controlnet_aux<jupyter_output><empty_output><jupyter_text>We will use the famous painting ["Girl With A Pearl"](https://en.wikipedia.org/wiki/Girl_with_a_Pearl_Earring) for this example. So, let's download the image and take a look:<jupyter_code>from diffusers import StableDiffusionControlNetPipeline from diffusers.utils import load_image image = load_image( "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png" ) image<jupyter_output>WARNING:xformers:A matching Triton is not available, some optimizations will not be enabled. Error caught was: No module named 'triton'<jupyter_text>Next, we will put the image through the canny pre-processor:<jupyter_code>import cv2 from PIL import Image import numpy as np image = np.array(image) low_threshold = 100 high_threshold = 200 image = cv2.Canny(image, low_threshold, high_threshold) image = image[:, :, None] image = np.concatenate([image, image, image], axis=2) canny_image = Image.fromarray(image) canny_image<jupyter_output><empty_output><jupyter_text>As we can see, it is essentially edge detection.Now, we load [runwaylml/stable-diffusion-v1-5](https://huggingface.co/runwayml/stable-diffusion-v1-5) as well as the [ControlNet model for canny edges](https://huggingface.co/lllyasviel/sd-controlnet-canny). The models are loaded in half-precision (`torch.dtype`) to allow for fast and memory-efficient inference.<jupyter_code>from diffusers import StableDiffusionControlNetPipeline, ControlNetModel import torch controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16) pipe = StableDiffusionControlNetPipeline.from_pretrained( "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16 )<jupyter_output><empty_output><jupyter_text>Instead of using Stable Diffusion's default [PNDMScheduler](https://huggingface.co/docs/diffusers/main/en/api/schedulers/pndm), we use one of the currently fastest diffusion model schedulers, called [UniPCMultistepScheduler](https://huggingface.co/docs/diffusers/main/en/api/schedulers/unipc).Choosing an improved scheduler can drastically reduce inference time - in our case we are able to reduce the number of inference steps from 50 to 20 while more or less keeping the same image generation quality. More information regarding schedulers can be found [here](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers).<jupyter_code>from diffusers import UniPCMultistepScheduler pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)<jupyter_output><empty_output><jupyter_text>Instead of loading our pipeline directly to GPU, we instead enable smart CPU offloading which can be achieved with the [`enable_model_cpu_offload` function](https://huggingface.co/docs/diffusers/main/en/api/pipelines/stable_diffusion/controlnetdiffusers.StableDiffusionControlNetPipeline.enable_model_cpu_offload).Remember that during inference diffusion models, such as Stable Diffusion require not just one but multiple model components that are run sequentially.In the case of Stable Diffusion with ControlNet, we first use the CLIP text encoder, then the diffusion model unet and control net, then the VAE decoder and finally run a safety checker.Most components are only run once during the diffusion process and are thus not required to occupy GPU memory all the time. By enabling smart model offloading, we make sure that each component is only loaded into GPU when it's needed so that we can significantly save memory consumption without significantly slowing down infenence.**Note**: When running `enable_model_cpu_offload`, do not manually move the pipeline to GPU with `.to("cuda")` - once CPU offloading is enabled, the pipeline automatically takes care of GPU memory management.<jupyter_code>pipe.enable_model_cpu_offload()<jupyter_output><empty_output><jupyter_text>Finally, we want to take full advantage of the amazing [FlashAttention/xformers](https://github.com/facebookresearch/xformers) attention layer acceleration, so let's enable this! If this command does not work for you, you might not have `xformers` correctly installed.In this case, you can just skip the following line of code.<jupyter_code>pipe.enable_xformers_memory_efficient_attention()<jupyter_output><empty_output><jupyter_text>Now we are ready to run the ControlNet pipeline!We still provide a prompt to guide the image generation process, just like what we would normally do with a Stable Diffusion image-to-image pipeline. However, ControlNet will allow a lot more control over the generated image because we will be able to control the exact composition in generated image with the canny edge image we just created.It will be fun to see some images where contemporary celebrities posing for this exact same painting from the 17th century. And it's really easy to do that with ControlNet, all we have to do is to include the names of these celebrities in the prompt!<jupyter_code>def image_grid(imgs, rows, cols): assert len(imgs) == rows * cols w, h = imgs[0].size grid = Image.new("RGB", size=(cols * w, rows * h)) grid_w, grid_h = grid.size for i, img in enumerate(imgs): grid.paste(img, box=(i % cols * w, i // cols * h)) return grid prompt = ", best quality, extremely detailed" prompt = [t + prompt for t in ["Sandra Oh", "Kim Kardashian", "rihanna", "taylor swift"]] generator = [torch.Generator(device="cpu").manual_seed(2) for i in range(len(prompt))] output = pipe( prompt, canny_image, negative_prompt=["monochrome, lowres, bad anatomy, worst quality, low quality"] * len(prompt), generator=generator, num_inference_steps=20, ) image_grid(output.images, 2, 2)<jupyter_output><empty_output><jupyter_text>We can effortlessly combine ControlNet combines with fine-tuning too! For example, we can fine-tune a model with [DreamBooth](https://huggingface.co/docs/diffusers/main/en/training/dreambooth), and use it to render ourselves into different scenes.In this post, we are going to use our beloved Mr Potato Head as an example to show how to use ControlNet with DreamBooth.We can use the same ContrlNet, however instead of using the Stable Diffusion 1.5, we are going to load the [Mr Potato Head model](https://huggingface.co/sd-dreambooth-library/mr-potato-head) into our pipeline - Mr Potato Head is a Stable Diffusion model fine-tuned with Mr Potato Head concept using Dreambooth 🥔Let's run the above commands again, keeping the same `controlnet` though!<jupyter_code>model_id = "sd-dreambooth-library/mr-potato-head" pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16, ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.enable_xformers_memory_efficient_attention()<jupyter_output><empty_output><jupyter_text>Now let's make Mr Potato posing for [Johannes Vermeer](https://en.wikipedia.org/wiki/Johannes_Vermeer)!<jupyter_code>generator = torch.manual_seed(2) prompt = "a photo of sks mr potato head, best quality, extremely detailed" output = pipe( prompt, canny_image, negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality", generator=generator, num_inference_steps=20, )<jupyter_output><empty_output><jupyter_text>It is noticeable that Mr Potato Head is not the best candidate but he tried his best and did a pretty good job in capture some of the essence 🍟<jupyter_code>output.images[0]<jupyter_output><empty_output><jupyter_text>It is noticeable that Mr Potato Head is not the best candidate but he tried his best and did a pretty good job in capture some of the essence 🍟 Another exclusive application of ControlNet is that we can take a pose from one image and reuse it to generate a different image with the exact same pose. So in this next example, we are going to teach superheroes how to do yoga using [Open Pose ControlNet](https://huggingface.co/lllyasviel/sd-controlnet-openpose)!First, we will need to get some images of people doing yoga:<jupyter_code>urls = "yoga1.jpeg", "yoga2.jpeg", "yoga3.jpeg", "yoga4.jpeg" imgs = [ load_image("https://hf.co/datasets/YiYiXu/controlnet-testing/resolve/main/" + url) for url in urls ] image_grid(imgs, 2, 2)<jupyter_output><empty_output><jupyter_text>Now let's extract yoga poses using the OpenPose pre-processors that are handily available via `controlnet_aux`.<jupyter_code>from controlnet_aux import OpenposeDetector model = OpenposeDetector.from_pretrained("lllyasviel/ControlNet") poses = [model(img) for img in imgs] image_grid(poses, 2, 2)<jupyter_output><empty_output><jupyter_text>To use these yoga poses to generate new images, let's create a [Open Pose ControlNet](https://huggingface.co/lllyasviel/sd-controlnet-openpose). We will generate some super-hero images but in the yoga poses shown above. Let's go 🚀<jupyter_code>controlnet = ControlNetModel.from_pretrained( "fusing/stable-diffusion-v1-5-controlnet-openpose", torch_dtype=torch.float16 ) model_id = "runwayml/stable-diffusion-v1-5" pipe = StableDiffusionControlNetPipeline.from_pretrained( model_id, controlnet=controlnet, torch_dtype=torch.float16, ) pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) pipe.enable_model_cpu_offload() pipe.enable_xformers_memory_efficient_attention()<jupyter_output><empty_output><jupyter_text>Now it's yoga time!<jupyter_code>generator = [torch.Generator(device="cpu").manual_seed(2) for i in range(4)] prompt = "super-hero character, best quality, extremely detailed" output = pipe( [prompt] * 4, poses, negative_prompt=["monochrome, lowres, bad anatomy, worst quality, low quality"] * 4, generator=generator, num_inference_steps=20, ) image_grid(output.images, 2, 2)<jupyter_output><empty_output>
notebooks/diffusers/controlnet.ipynb/0
{ "file_path": "notebooks/diffusers/controlnet.ipynb", "repo_id": "notebooks", "token_count": 4606 }
161
<jupyter_start><jupyter_text>🧨 Fast Stable Diffusion in free Colab with JAX / Flax on TPU!🤗 Hugging Face [Diffusers](https://github.com/huggingface/diffusers) supports Flax since version `0.5.1`! This allows for snappy inference on Google TPUs, such as those available in Colab, Kaggle or through Google Cloud Platform.If you want more details about how Stable Diffusion works using JAX please refer to [our blog](https://huggingface.co/blog/stable_diffusion_jax) or [this Colab notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/stable_diffusion.ipynb). Initial Steps<jupyter_code>#@title Install required libraries !pip install huggingface_hub==0.10.0 gradio #@title Login to the Hugging Face Hub #@markdown Make sure you also have read and accept the LICENSE of the [Stable Diffusion model](https://huggingface.co/CompVis/stable-diffusion-v1-4), otherwise you may find an error from huggingface_hub import notebook_login !git config --global credential.helper store notebook_login()<jupyter_output><empty_output><jupyter_text>SetupRun all cells for setting up JAX and the model<jupyter_code>#@title Set up JAX #@markdown If you see an error, make sure you are using a TPU backend. Select `Runtime` in the menu above, then select the option "Change runtime type" and then select `TPU` under the `Hardware accelerator` setting. !pip install --upgrade jax jaxlib import jax.tools.colab_tpu jax.tools.colab_tpu.setup_tpu('tpu_driver_20221011') !pip install flax diffusers transformers ftfy jax.devices() #@title Import required libraries import numpy as np import jax import jax.numpy as jnp from pathlib import Path from jax import pmap from flax.jax_utils import replicate from flax.training.common_utils import shard from PIL import Image from huggingface_hub import notebook_login from diffusers import FlaxStableDiffusionPipeline import torch def image_grid(imgs, rows, cols): w,h = imgs[0].size grid = Image.new('RGB', size=(cols*w, rows*h)) for i, img in enumerate(imgs): grid.paste(img, box=(i%cols*w, i//cols*h)) return grid #@title Load the model #@markdown It's safe to ignore the warning messages, everything is okay pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", revision="bf16", dtype=jnp.bfloat16) p_params = replicate(params)<jupyter_output><empty_output><jupyter_text>Run!<jupyter_code>#@title Set and go! #@markdown First run takes ~50s as it compiles stuff. Then, it should take around ~8s per prompt! prompt = "the spirit of a tamagotchi wandering in the city of Vienna" #@param {type:"string"} num_inference_steps = 50 #@param {type:"integer"} seed = -1 #@param {type:"integer"} #@markdown `-1` will set a random seed. You can replace that to any integer for reproducible results if(seed == -1): import random random_int = random.randint(0, 2147483647) real_seed = random_int else: real_seed = seed prng_seed = jax.random.PRNGKey(real_seed) prng_seed = jax.random.split(prng_seed, jax.device_count()) num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, p_params, prng_seed, num_inference_steps, jit=True).images images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) if(seed == -1): print(f"Seed used {real_seed}") image_grid(images_pil, 2, 4) #@title Easy to use and shareble UI with Gradio #@markdown Run your demo using a Gradio UI like on this screenshot #@markdown <img src="https://i.imgur.com/H6MtbI5.png" width="900" /> import gradio as gr def inference(prompt, seed): all_images = [] print(seed) prng_seed = jax.random.PRNGKey(int(seed)) prng_seed = jax.random.split(prng_seed, jax.device_count()) num_samples = jax.device_count() prompt = num_samples * [prompt] prompt_ids = pipeline.prepare_inputs(prompt) prompt_ids = shard(prompt_ids) images = pipeline(prompt_ids, p_params, prng_seed, num_inference_steps, jit=True).images images_pil = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) return images_pil import random random_int = random.randint(0, 2147483647) with gr.Blocks() as demo: with gr.Row(): with gr.Column(): prompt = gr.Textbox(label="prompt") seed = gr.Number(label="seed", value=random_int) run = gr.Button(value="Run") with gr.Column(): gallery = gr.Gallery(show_label=False).style(grid=[2]) run.click(inference, inputs=[prompt, seed], outputs=gallery) gr.Examples([["the spirit of a tamagotchi wandering in the city of Vienna", 1,1]], [prompt], gallery, inference, cache_examples=False) demo.launch(debug=True)<jupyter_output>Colab notebook detected. This cell will run indefinitely so that you can see errors and logs. To turn off, set debug=False in launch(). Running on public URL: https://28004.gradio.app This share link expires in 72 hours. For free permanent hosting, check out Spaces: https://huggingface.co/spaces
notebooks/diffusers/stable_diffusion_fast_jax.ipynb/0
{ "file_path": "notebooks/diffusers/stable_diffusion_fast_jax.ipynb", "repo_id": "notebooks", "token_count": 1808 }
162
<jupyter_start><jupyter_text>Before we can browse the rest of the notebook, we need to install the dependencies: this example uses `datasets` and `transformers`. To use TPUs on colab, we need to install `torch_xla` and the last line install `accelerate` from source since we the features we are using are very recent and not released yet.<jupyter_code>! pip install datasets transformers evaluate ! pip install cloud-tpu-client==0.10 torch==2.0.0 ! pip install https://storage.googleapis.com/tpu-pytorch/wheels/colab/torch_xla-2.0-cp310-cp310-linux_x86_64.whl ! pip install git+https://github.com/huggingface/accelerate<jupyter_output><empty_output><jupyter_text>Here are all the imports we will need for this notebook.<jupyter_code>import torch from torch.utils.data import DataLoader from accelerate import Accelerator, DistributedType from datasets import load_dataset, load_metric from transformers import ( AdamW, AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed, ) from tqdm.auto import tqdm import datasets import transformers<jupyter_output>WARNING:root:TPU has started up successfully with version pytorch-1.8<jupyter_text>This notebook can run with any model checkpoint on the [model hub](https://huggingface.co/models) that has a version with a classification head. Here we select [`bert-base-cased`](https://huggingface.co/bert-base-cased).<jupyter_code>model_checkpoint = "bert-base-cased"<jupyter_output><empty_output><jupyter_text>The next two sections explain how we load and prepare our data for our model, If you are only interested on seeing how 🤗 Accelerate works, feel free to skip them (but make sure to execute all cells!) Load the data To load the dataset, we use the `load_dataset` function from 🤗 Datasets. It will download and cache it (so the download won't happen if we restart the notebook).<jupyter_code>raw_datasets = load_dataset("glue", "mrpc")<jupyter_output>WARNING:datasets.builder:Reusing dataset glue (/root/.cache/huggingface/datasets/glue/mrpc/1.0.0/dacbe3125aa31d7f70367a07a8a9e72a5a0bfeb5fc42e75c9db75b96da6053ad)<jupyter_text>The `raw_datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set (with more keys for the mismatched validation and test set in the special case of `mnli`).<jupyter_code>raw_datasets<jupyter_output><empty_output><jupyter_text>To access an actual element, you need to select a split first, then give an index:<jupyter_code>raw_datasets["train"][0]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>import datasets import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, datasets.ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) display(HTML(df.to_html())) show_random_elements(raw_datasets["train"])<jupyter_output><empty_output><jupyter_text>Preprocess the data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:we get a tokenizer that corresponds to the model architecture we want to use,we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>By default (unless you pass `use_fast=Fast` to the call above) it will use one of the fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. Those fast tokenizers are available for almost all models, but if you got an error with the previous call, remove that argument.You can directly call this tokenizer on one sentence or a pair of sentences:<jupyter_code>tokenizer("Hello, this one sentence!", "And this sentence goes with it.")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested. We can them write the function that will preprocess our samples. We just feed them to the `tokenizer` with the argument `truncation=True`. We also need all of our samples to have the same length (we will train on TPU and they need fixed shapes so we won't pad to the maximum length of a batch) which is done with `padding=True`. The `max_length` argument is used both for the truncation and padding (short inputs are padded to that length and long inputs are truncated to it).<jupyter_code>def tokenize_function(examples): outputs = tokenizer(examples["sentence1"], examples["sentence2"], truncation=True, padding="max_length", max_length=128) return outputs<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>tokenize_function(raw_datasets['train'][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command.<jupyter_code>tokenized_datasets = raw_datasets.map(tokenize_function, batched=True, remove_columns=["idx", "sentence1", "sentence2"])<jupyter_output><empty_output><jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently.Lastly, we remove the columns that our model will not use. We also need to rename the `label` column to `labels` as this is what our model will expect.<jupyter_code>tokenized_datasets = tokenized_datasets.rename_column("label", "labels")<jupyter_output><empty_output><jupyter_text>To double-check we only have columns that are accepted as arguments for the model we will instantiate, we can look at them here.<jupyter_code>tokenized_datasets["train"].features<jupyter_output><empty_output><jupyter_text>The model we will be using is a `BertModelForSequenceClassification`. We can check its signature in the [Transformers documentation](https://huggingface.co/transformers/model_doc/bert.htmltransformers.BertForSequenceClassification) and all seems to be right! The last step is to set our datasets in the `"torch"` format, so that each item in it is now a dictionary with tensor values.<jupyter_code>tokenized_datasets.set_format("torch")<jupyter_output><empty_output><jupyter_text>A first look at the model Now that our data is ready, we can download the pretrained model and fine-tune it. Since all our tasks are about sentence classification, we use the `AutoModelForSequenceClassification` class. Like with the tokenizer, the from_pretrained method will download and cache the model for us. The only thing we have to specify is the number of labels for our problem (which is 2 here):<jupyter_code>from transformers import AutoModelForSequenceClassification model = AutoModelForSequenceClassification.from_pretrained(model_checkpoint, num_labels=2)<jupyter_output>Some weights of the model checkpoint at bert-base-cased were not used when initializing BertForSequenceClassification: ['cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias'] - This IS expected if you are initializing BertForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing BertForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of BertForSequenceClassification were not initialized from the model checkpoint at b[...]<jupyter_text>The warning is telling us we are throwing away some weights (the vocab_transform and vocab_layer_norm layers) and randomly initializing some other (the pre_classifier and classifier layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do.Note that we will are only creating the model here to look at it and debug problems. We will create the model we will train inside our training function: to train on TPU in colab, we have to create a big training function that will be executed on each code of the TPU. It's fine to do use the datasets defined before (they will be copied to each TPU core) but the model itself will need to be re-instantiated and placed on each device for it to work.Now to get the data we need to define our training and evaluation dataloaders. Again, we only create them here for debugging purposes, they will be re-instantiated in our training function, which is why we define a function that builds them.<jupyter_code>def create_dataloaders(train_batch_size=8, eval_batch_size=32): train_dataloader = DataLoader( tokenized_datasets["train"], shuffle=True, batch_size=train_batch_size ) eval_dataloader = DataLoader( tokenized_datasets["validation"], shuffle=False, batch_size=eval_batch_size ) return train_dataloader, eval_dataloader<jupyter_output><empty_output><jupyter_text>Let's have a look at our train and evaluation dataloaders to check a batch can go through the model.<jupyter_code>train_dataloader, eval_dataloader = create_dataloaders()<jupyter_output><empty_output><jupyter_text>We just loop through one batch. Since our datasets elements are dictionaries of tensors, it's the same for our batch and we can have a quick look at all the shapes. Note that this cell takes a bit of time to execute since we run a batch of our data through the model on the CPU (if you changed the checkpoint to a bigger model, it might take too much time so comment it out).⚠ **WARNING: Running this cell will cause training_function to malfunction, as model will be used before notebook_launcher**<jupyter_code>for batch in train_dataloader: print({k: v.shape for k, v in batch.items()}) outputs = model(**batch) break<jupyter_output>{'attention_mask': torch.Size([8, 128]), 'input_ids': torch.Size([8, 128]), 'labels': torch.Size([8]), 'token_type_ids': torch.Size([8, 128])}<jupyter_text>The output of our model is a `SequenceClassifierOutput`, with the `loss` (since we provided labels) and `logits` (of shape 8, our batch size, by 2, the number of labels).<jupyter_code>outputs<jupyter_output><empty_output><jupyter_text>The last piece we will need for the model evaluation is the metric. The `datasets` library provides a function `load_metric` that allows us to easily create a `datasets.Metric` object we can use.<jupyter_code>metric = load_metric("glue", "mrpc")<jupyter_output><empty_output><jupyter_text>To use this object on some predictions we call the `compute` methode to get our metric results:<jupyter_code>predictions = outputs.logits.detach().argmax(dim=-1) metric.compute(predictions=predictions, references=batch["labels"])<jupyter_output><empty_output><jupyter_text>Unsurpringly, our model with its random head does not perform well, which is why we need to fine-tune it! Fine-tuning the model We are now ready to fine-tune this model on our dataset. As mentioned before, everything related to training needs to be in one big training function that will be executed on each TPU core, thanks to our `notebook_launcher`.It will use this dictionary of hyperparameters, so tweak anything you like in here!<jupyter_code>hyperparameters = { "learning_rate": 2e-5, "num_epochs": 3, "train_batch_size": 8, # Actual batch size will this x 8 "eval_batch_size": 32, # Actual batch size will this x 8 "seed": 42, }<jupyter_output><empty_output><jupyter_text>The two most important things to remember for training on TPUs is that your accelerator object has to be defined inside your training function, and your model should be created outside the training function. If you define your Accelerator in another cell that gets executed before the final launch (for debugging), you will need to restart your notebook as the line `accelerator = Accelerator()` needs to be executed for the first time inside the training function spwaned on each TPU core.This is because that line will look for a TPU device, and if you set it outside of the distributed training launched by `notebook_launcher`, it will perform setup that cannot be undone in your runtime and you will only have access to one TPU core until you restart the notebook.The reason we declare the model outside the loop is because on a TPU when launched from a notebook the same singular model object is used, and it is passed back and forth between all the cores automatically. Since we can't explore each piece in separate cells, comments have been left in the code. This is all pretty standard and you will notice how little the code changes from a regular training loop! The main lines added are:- `accelerator = Accelerator()` to initalize the distributed setup,- sending all objects to `accelerator.prepare`,- replace `loss.backward()` with `accelerator.backward(loss)`,- use `accelerator.gather` to gather all predictions and labels before storing them in our list of predictions/labels,- truncate predictions and labels as the prepared evaluation dataloader has a few more samples to make batches of the same size on each process.The first three are for distributed training, the last two for distributed evaluation. If you don't care about distributed evaluation, you can also just replace that part by your standard evaluation loop launched on the main process only.Other changes (which are purely cosmetic to make the output of the training readable) are:- some logging behavior behind a `if accelerator.is_main_process:`,- disable the progress bar if `accelerator.is_main_process` is `False`,- use `accelerator.print` instead of `print`.<jupyter_code>def training_function(model): # Initialize accelerator accelerator = Accelerator() # To have only one message (and not 8) per logs of Transformers or Datasets, we set the logging verbosity # to INFO for the main process only. if accelerator.is_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() train_dataloader, eval_dataloader = create_dataloaders( train_batch_size=hyperparameters["train_batch_size"], eval_batch_size=hyperparameters["eval_batch_size"] ) # The seed need to be set before we instantiate the model, as it will determine the random head. set_seed(hyperparameters["seed"]) # Instantiate optimizer optimizer = AdamW(params=model.parameters(), lr=hyperparameters["learning_rate"]) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. model, optimizer, train_dataloader, eval_dataloader = accelerator.prepare( model, optimizer, train_dataloader, eval_dataloader ) num_epochs = hyperparameters["num_epochs"] # Instantiate learning rate scheduler after preparing the training dataloader as the prepare method # may change its length. lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=100, num_training_steps=len(train_dataloader) * num_epochs, ) # Instantiate a progress bar to keep track of training. Note that we only enable it on the main # process to avoid having 8 progress bars. progress_bar = tqdm(range(num_epochs * len(train_dataloader)), disable=not accelerator.is_main_process) # Now we train the model for epoch in range(num_epochs): model.train() for step, batch in enumerate(train_dataloader): outputs = model(**batch) loss = outputs.loss accelerator.backward(loss) optimizer.step() lr_scheduler.step() optimizer.zero_grad() progress_bar.update(1) model.eval() all_predictions = [] all_labels = [] for step, batch in enumerate(eval_dataloader): with torch.no_grad(): outputs = model(**batch) predictions = outputs.logits.argmax(dim=-1) # We gather predictions and labels from the 8 TPUs to have them all. all_predictions.append(accelerator.gather(predictions)) all_labels.append(accelerator.gather(batch["labels"])) # Concatenate all predictions and labels. # The last thing we need to do is to truncate the predictions and labels we concatenated # together as the prepared evaluation dataloader has a little bit more elements to make # batches of the same size on each process. all_predictions = torch.cat(all_predictions)[:len(tokenized_datasets["validation"])] all_labels = torch.cat(all_labels)[:len(tokenized_datasets["validation"])] eval_metric = metric.compute(predictions=all_predictions, references=all_labels) # Use accelerator.print to print only on the main process. accelerator.print(f"epoch {epoch}:", eval_metric)<jupyter_output><empty_output><jupyter_text>And we're ready for launch! It's super easy with the `notebook_launcher` from the Accelerate library.<jupyter_code>from accelerate import notebook_launcher notebook_launcher(training_function, (model,))<jupyter_output>loading configuration file https://huggingface.co/bert-base-cased/resolve/main/config.json from cache at /root/.cache/huggingface/transformers/a803e0468a8fe090683bdc453f4fac622804f49de86d7cecaee92365d4a0f829.a64a22196690e0e82ead56f388a3ef3a50de93335926ccfa20610217db589307 Model config BertConfig { "architectures": [ "BertForMaskedLM" ], "attention_probs_dropout_prob": 0.1, "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "position_embedding_type": "absolute", "transformers_version": "4.5.1", "type_vocab_size": 2, "use_cache": true, "vocab_size": 28996 } loading weights file https://huggingface.co/bert-base-cased/resolve/main/pytorch_model.bin from cache at /root/.cache/huggingface/transfo[...]
notebooks/examples/accelerate_examples/simple_nlp_example.ipynb/0
{ "file_path": "notebooks/examples/accelerate_examples/simple_nlp_example.ipynb", "repo_id": "notebooks", "token_count": 6454 }
163
<jupyter_start><jupyter_text>Fine-tune Pix2Struct using Hugging Face `transformers` and `datasets` 🤗This tutorial is largely based from the [GiT tutorial](https://colab.research.google.com/drive/1HLxgrG7xZJ9FvXckNG61J72FkyrbqKAA?usp=sharing) on how to fine-tune GiT on a custom image captioning dataset. Here we will use a dummy dataset of [football players](https://huggingface.co/datasets/ybelkada/football-dataset) ⚽ that is uploaded on the Hub. The images have been manually selected together with the captions. Check the 🤗 [documentation](https://huggingface.co/docs/datasets/image_dataset) on how to create and upload your own image-text dataset. Model overviewIn this tutorial, we will load an architecture called Pix2Struct recently released by Google and made them available on 🤗 Hub! This architecture differs from other models from its pretraining procedure and the way the model extract patches from the image by using the aspect-ratio preserving patch extraction method.The release came with no more than 20 checkpoints! As each checkpoint has been finetuned on specific domain, let's finetune our own Pix2Struct to our target domain: Football players! For that we will use the [`google/pix2struct-base`](https://huggingface.co/ybelkada/pix2struct-base) which corresponds to a general usecase model that you can load to fine-tune your model. Set-up environment Run the cells below to setup the environment<jupyter_code>!pip install -q git+https://github.com/huggingface/transformers.git !pip install -q datasets<jupyter_output> ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 469.0/469.0 KB 20.1 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.0/1.0 MB 63.8 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 132.9/132.9 KB 17.4 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 212.2/212.2 KB 23.2 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 110.5/110.5 KB 12.9 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 264.6/264.6 KB 24.1 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 199.2/199.2 KB 18.9 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 114.2/114.2 KB 14.9 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━[...]<jupyter_text>Load the image captioning datasetLet's load the image captioning dataset, you just need few lines of code for that. The dataset only consists of 6 images that we have manually labeled for the sake of the tutorial.<jupyter_code>from datasets import load_dataset dataset = load_dataset("ybelkada/football-dataset", split="train")<jupyter_output><empty_output><jupyter_text>Let's retrieve the caption of the first example:<jupyter_code>dataset[0]["text"]<jupyter_output><empty_output><jupyter_text>And the corresponding image<jupyter_code>dataset[0]["image"]<jupyter_output><empty_output><jupyter_text>Create PyTorch Dataset Understanding `max_patches` argumentThe paper introduces a new paradigm for processing the input image. It takes the image and create `n_patches` aspect-ratio preserving patches, and concatenates the remaining sequence with padding tokens to finally get `max_patches` patches. It appears that this argument is quite crucial for training and evaluation, as the model becomes very sensitive to this parameter.For the sake of our example, we will fine-tune a model with `max_patches=1024`.Note that most of the `-base` models have been fine-tuned with `max_patches=2048`, and `4096` for `-large` models.<jupyter_code>from torch.utils.data import Dataset, DataLoader MAX_PATCHES = 1024 class ImageCaptioningDataset(Dataset): def __init__(self, dataset, processor): self.dataset = dataset self.processor = processor def __len__(self): return len(self.dataset) def __getitem__(self, idx): item = self.dataset[idx] encoding = self.processor(images=item["image"], return_tensors="pt", add_special_tokens=True, max_patches=MAX_PATCHES) encoding = {k:v.squeeze() for k,v in encoding.items()} encoding["text"] = item["text"] return encoding<jupyter_output><empty_output><jupyter_text>Load model and processor<jupyter_code>from transformers import AutoProcessor, Pix2StructForConditionalGeneration processor = AutoProcessor.from_pretrained("ybelkada/pix2struct-base") model = Pix2StructForConditionalGeneration.from_pretrained("ybelkada/pix2struct-base")<jupyter_output><empty_output><jupyter_text>Now that we have loaded the processor, let's load the dataset and the dataloader:<jupyter_code>def collator(batch): new_batch = {"flattened_patches":[], "attention_mask":[]} texts = [item["text"] for item in batch] text_inputs = processor(text=texts, padding="max_length", return_tensors="pt", add_special_tokens=True, max_length=20) new_batch["labels"] = text_inputs.input_ids for item in batch: new_batch["flattened_patches"].append(item["flattened_patches"]) new_batch["attention_mask"].append(item["attention_mask"]) new_batch["flattened_patches"] = torch.stack(new_batch["flattened_patches"]) new_batch["attention_mask"] = torch.stack(new_batch["attention_mask"]) return new_batch train_dataset = ImageCaptioningDataset(dataset, processor) train_dataloader = DataLoader(train_dataset, shuffle=True, batch_size=2, collate_fn=collator)<jupyter_output><empty_output><jupyter_text>Train the model Let's train the model! Run the simply the cell below for training the model. We have observed that finding the best hyper-parameters was quite challenging and required a lot of trials and errors, as the model can easily enter in "collapse-model" (always predicting the same output, no matter the input) if the HP are not chosen correctly. In this example, we found out that using `AdamW` optimizer with `lr=1e-5` seemed to be the best approach.Let's also print the generation output of the model each 20 epochs!Bear in mind that the model took some time to converge, for instance to get decent results we had to let the script run for ~1hour.<jupyter_code>import torch EPOCHS = 5000 optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) model.train() for epoch in range(EPOCHS): print("Epoch:", epoch) for idx, batch in enumerate(train_dataloader): labels = batch.pop("labels").to(device) flattened_patches = batch.pop("flattened_patches").to(device) attention_mask = batch.pop("attention_mask").to(device) outputs = model(flattened_patches=flattened_patches, attention_mask=attention_mask, labels=labels) loss = outputs.loss print("Loss:", loss.item()) loss.backward() optimizer.step() optimizer.zero_grad() if (epoch + 1) % 20 == 0: model.eval() predictions = model.generate(flattened_patches=flattened_patches, attention_mask=attention_mask) print("Predictions:", processor.batch_decode(predictions, skip_special_tokens=True)) model.train()<jupyter_output><empty_output><jupyter_text>Inference Let's check the results on our train dataset<jupyter_code># load image example = dataset[0] image = example["image"] image # prepare image for the model model.eval() inputs = processor(images=image, return_tensors="pt", max_patches=512).to(device) flattened_patches = inputs.flattened_patches attention_mask = inputs.attention_mask generated_ids = model.generate(flattened_patches=flattened_patches, attention_mask=attention_mask, max_length=50) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] print(generated_caption)<jupyter_output><empty_output><jupyter_text>Load from the Hub Once trained you can push the model and processor on the Hub to use them later. Meanwhile you can play with the model that we have fine-tuned!<jupyter_code>import torch from transformers import Pix2StructForConditionalGeneration, AutoProcessor device = "cuda" if torch.cuda.is_available() else "cpu" model = Pix2StructForConditionalGeneration.from_pretrained("ybelkada/pix2struct-base-football").to(device) processor = AutoProcessor.from_pretrained("ybelkada/pix2struct-base-football")<jupyter_output><empty_output><jupyter_text>Let's check the results on our train dataset!<jupyter_code>from matplotlib import pyplot as plt fig = plt.figure(figsize=(18, 14)) # prepare image for the model for i, example in enumerate(dataset): image = example["image"] inputs = processor(images=image, return_tensors="pt", max_patches=1024).to(device) flattened_patches = inputs.flattened_patches attention_mask = inputs.attention_mask generated_ids = model.generate(flattened_patches=flattened_patches, attention_mask=attention_mask, max_length=50) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] fig.add_subplot(2, 3, i+1) plt.imshow(image) plt.axis("off") plt.title(f"Generated caption: {generated_caption}")<jupyter_output>A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer. A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer. A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer. A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer. A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='left'` when initializing the tokenizer. A decoder-only architecture is being used, but right-padding was detected! For correct generation results, please set `padding_side='le[...]
notebooks/examples/image_captioning_pix2struct.ipynb/0
{ "file_path": "notebooks/examples/image_captioning_pix2struct.ipynb", "repo_id": "notebooks", "token_count": 3848 }
164
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it.<jupyter_code>#! pip install transformers datasets huggingface_hub<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then uncomment the following cell and input your token:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS and setup Git if you haven't already. On Linux, uncomment the following instructions and adapt with your name and email. On Windows, please download git-lfs at https://git-lfs.github.com/<jupyter_code># !apt install git-lfs # !git config --global user.email "[email protected]" # !git config --global user.name "Your Name"<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.16.0 since some of the functionality we use was introduced in that version:<jupyter_code>import transformers print(transformers.__version__)<jupyter_output>4.16.0.dev0<jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("language_modeling_from_scratch_notebook", framework="tensorflow")<jupyter_output><empty_output><jupyter_text>Train a language model In this notebook, we'll see how to train a [🤗 Transformers](https://github.com/huggingface/transformers) model on a language modeling task. We will cover two types of language modeling tasks which are:- Causal language modeling: the model has to predict the next token in the sentence (so the labels are the same as the inputs shifted to the right). To make sure the model does not cheat, its attention computations are masked so that tokens cannot attend to tokens to their right, as this would result in label leakage.- Masked language modeling: the model has to predict some tokens that are masked in the input. It still has access to the whole sentence, so it can use the tokens before and after the tokens masked to predict their value.We will see how to easily load and preprocess the dataset for each one of those tasks, and how to use the `Trainer` API to train a model on it.This notebooks assumes you have trained a tokenizer on the corpus you are using, see the [How to train a tokenizer](https://github.com/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb) notebook ([open in colab](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/tokenizer_training.ipynb)).A script version of this notebook you can directly run on a distributed environment or on TPU is available in our [examples folder](https://github.com/huggingface/transformers/tree/master/examples). Preparing the dataset For each of those tasks, we will use the [Wikitext 2]() dataset as an example. You can load it very easily with the 🤗 Datasets library.<jupyter_code>from datasets import load_dataset datasets = load_dataset("wikitext", "wikitext-2-raw-v1")<jupyter_output>Reusing dataset wikitext (/home/matt/.cache/huggingface/datasets/wikitext/wikitext-2-raw-v1/1.0.0/a241db52902eaf2c6aa732210bead40c090019a499ceb13bcbfa3f8ab646a126)<jupyter_text>You can replace the dataset above with any dataset hosted on [the hub](https://huggingface.co/datasets) or use your own files. Just uncomment the following cell and replace the paths with your own input files:<jupyter_code># datasets = load_dataset("text", data_files={"train": path_to_train.txt, "validation": path_to_validation.txt}<jupyter_output><empty_output><jupyter_text>You can also load datasets from a csv or a JSON file, see the [full documentation](https://huggingface.co/docs/datasets/loading_datasets.htmlfrom-local-files) for more information. To access an actual element, you need to select a split first, then give an index:<jupyter_code>datasets["train"][10]<jupyter_output><empty_output><jupyter_text>To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset.<jupyter_code>from datasets import ClassLabel import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len( dataset ), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset) - 1) while pick in picks: pick = random.randint(0, len(dataset) - 1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) display(HTML(df.to_html())) show_random_elements(datasets["train"])<jupyter_output><empty_output><jupyter_text>As we can see, some of the texts are a full paragraph of a Wikipedia article while others are just titles or empty lines. Causal Language modeling For causal language modeling (CLM) we are going to take all the texts in our dataset, tokenize them and concatenate them. Then we will split them into examples of a fixed sequence length. This way the model will receive chunks of contiguous text that may look like:```part of text 1```or ```end of text 1 [BOS_TOKEN] beginning of text 2```depending on whether they span multiple original texts or not. The labels will be the same as the inputs, shifted to the right.We will use the [`gpt2`](https://huggingface.co/gpt2) architecture for this example. You can pick any of the checkpoints listed [here](https://huggingface.co/models?filter=causal-lm) instead. For the tokenizer, you can optionally replace the checkpoint with one that you trained yourself.<jupyter_code>model_checkpoint = "gpt2" tokenizer_checkpoint = "sgugger/gpt2-like-tokenizer" from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(tokenizer_checkpoint)<jupyter_output><empty_output><jupyter_text>We can now call the tokenizer on all our texts. This is very simple, using the [`map`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasets.Dataset.map) method from the Datasets library. First we define a function that calls the tokenizer on our texts:<jupyter_code>def tokenize_function(examples): return tokenizer(examples["text"])<jupyter_output><empty_output><jupyter_text>Then we apply it to all the splits in our `datasets` object, using `batched=True` and 4 processes to speed up the preprocessing. We won't need the `text` column afterward, so we discard it.<jupyter_code>tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=4, remove_columns=["text"] )<jupyter_output><empty_output><jupyter_text>If we now look at an element of our datasets, we will see the text have been replaced by the `input_ids` the model will need:<jupyter_code>tokenized_datasets["train"][1]<jupyter_output><empty_output><jupyter_text>Now for the harder part: We need to concatenate all our texts together, and then split the result into chunks of a fixed size, which we will call `block_size`. To do this, we will use the `map` method again, with the option `batched=True`. When we use `batched=True`, the function we pass to `map()` will be passed multiple inputs at once, allowing us to group them into more or fewer examples than we had in the input. This allows us to create our new fixed-length samples.We can use any `block_size`, but high values might be too big to fit in your GPU RAM, so let's use something a bit smaller: 128.<jupyter_code># block_size = tokenizer.model_max_length block_size = 128<jupyter_output><empty_output><jupyter_text>Then we write the preprocessing function that will group our texts:<jupyter_code>def group_texts(examples): # Concatenate all texts. concatenated_examples = {k: sum(examples[k], []) for k in examples.keys()} total_length = len(concatenated_examples[list(examples.keys())[0]]) # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can # customize this part to your needs. total_length = (total_length // block_size) * block_size # Split by chunks of max_len. result = { k: [t[i : i + block_size] for i in range(0, total_length, block_size)] for k, t in concatenated_examples.items() } result["labels"] = result["input_ids"].copy() return result<jupyter_output><empty_output><jupyter_text>Note that we duplicate the inputs for our labels, without shifting them, even though we told you the labels need to be shifted! This is because CausalLM models in the 🤗 Transformers library automatically apply right-shifting to the inputs, so we don't need to do it manually.Also note that by default, the `map` method will send a batch of 1,000 examples to be treated by the preprocessing function. So here, we will drop the remainder to make the concatenated tokenized texts a multiple of `block_size` every 1,000 examples. You can adjust this behavior by passing a higher batch size (which will also be processed slower). You can also speed-up the preprocessing by using multiprocessing:<jupyter_code>lm_datasets = tokenized_datasets.map( group_texts, batched=True, batch_size=1000, num_proc=4, )<jupyter_output><empty_output><jupyter_text>And we can check our datasets have changed: now the samples contain chunks of `block_size` contiguous tokens, potentially spanning several of our original texts.<jupyter_code>tokenizer.decode(lm_datasets["train"][1]["input_ids"])<jupyter_output><empty_output><jupyter_text>Now that the data has been cleaned, we're ready to initialize our `Model`. First we create the model using the same config as our checkpoint, but initialized with random weights:<jupyter_code>from transformers import AutoConfig, TFAutoModelForCausalLM config = AutoConfig.from_pretrained(model_checkpoint) model = TFAutoModelForCausalLM.from_config(config)<jupyter_output>2022-01-28 14:15:22.987842: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2022-01-28 14:15:22.992329: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2022-01-28 14:15:22.993015: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:936] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero 2022-01-28 14:15:22.994216: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags[...]<jupyter_text>Now let's set some hyperparameters like the learning rate and weight decay, as well as the model ID, if we want to upload our model to the Hub afterwards.<jupyter_code>learning_rate = 2e-5 weight_decay = 0.01 push_to_hub_model_id = f"{model_checkpoint}-wikitext2"<jupyter_output><empty_output><jupyter_text>Now we initialize our optimizer.<jupyter_code>from transformers import AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=learning_rate, weight_decay_rate=weight_decay)<jupyter_output><empty_output><jupyter_text>Next, we compile our model. Note that most Transformers models compute loss internally, so we actually don't have to specify anything for that argument! You can of course set your own loss function if you want, but by default our models will choose the 'obvious' loss that matches their task, such as cross-entropy in the case of language modelling. The built-in loss will also correctly handle things like masking the loss on padding tokens, or unlabelled tokens in the case of masked language modelling, so we recommend using it unless you're an advanced user!We also use the `jit_compile` argument to compile the model with [XLA](https://www.tensorflow.org/xla). XLA compilation adds a delay at the start of training, but this is quickly repaid by faster training iterations after that. It has one downside, though - if the shape of your input changes at all, then it will need to rerun the compilation again! This isn't a problem for us in this notebook, because all of our examples are exactly the same length. Be careful with it when that isn't true, though - if you have a variable sequence length in your batches, then you might spend more time compiling your model than actually training, especially for small datasets!If you encounter difficulties when training with XLA, it's a good idea to remove the `jit_compile` argument and see if that fixes things. In fact, when debugging, it can be helpful to skip graph compilation entirely with the `run_eagerly=True` argument to [`compile()`](https://www.tensorflow.org/api_docs/python/tf/keras/Modelcompile). This will let you identify the exact line of code where problems arise, but it will significantly reduce your performance, so make sure to remove it again when you've fixed the problem!<jupyter_code>import tensorflow as tf model.compile(optimizer=optimizer, jit_compile=True)<jupyter_output><empty_output><jupyter_text>Next, we convert our datasets to `tf.data.Dataset`, which Keras understands natively. There are two ways to do this - we can use the slightly more low-level [`Dataset.to_tf_dataset()`](https://huggingface.co/docs/datasets/package_reference/main_classesdatasets.Dataset.to_tf_dataset) method, or we can use [`Model.prepare_tf_dataset()`](https://huggingface.co/docs/transformers/main_classes/modeltransformers.TFPreTrainedModel.prepare_tf_dataset). The main difference between these two is that the `Model` method can inspect the model to determine which column names it can use as input, which means you don't need to specify them yourself. It also supplies a data collator by default which is appropriate for most tasks.<jupyter_code>train_set = model.prepare_tf_dataset( lm_datasets["train"], shuffle=True, batch_size=16, ) validation_set = model.prepare_tf_dataset( lm_datasets["validation"], shuffle=False, batch_size=16, )<jupyter_output><empty_output><jupyter_text>Now we can train our model. We can also add a callback to sync up our model with the Hub - this allows us to resume training from other machines and even test the model's inference quality midway through training! Make sure to change the `username` if you do. If you don't want to do this, simply remove the callbacks argument in the call to `fit()`.<jupyter_code>from transformers.keras_callbacks import PushToHubCallback model_name = model_checkpoint.split("/")[-1] push_to_hub_model_id = f"{model_name}-finetuned-wikitext2" callback = PushToHubCallback( output_dir="./clm_from_scratch_model_save", tokenizer=tokenizer, hub_model_id=push_to_hub_model_id, ) model.fit(train_set, validation_data=validation_set, epochs=2, callbacks=[callback])<jupyter_output>/home/matt/PycharmProjects/notebooks/examples/clm_from_scratch_model_save is already a clone of https://huggingface.co/Rocketknight1/gpt2-finetuned-wikitext2. Make sure you pull the latest changes with `repo.git_pull()`.<jupyter_text>Once the training is completed, we can evaluate our model and get its loss on the validation set like this:<jupyter_code>eval_loss = model.evaluate(validation_set)<jupyter_output>121/121 [==============================] - 6s 51ms/step - loss: 6.3490<jupyter_text>The quality of language models is often measured in 'perplexity' rather than cross-entropy. To convert to perplexity, we simply raise e to the power of the cross-entropy loss.<jupyter_code>import math print(f"Perplexity: {math.exp(eval_loss):.2f}")<jupyter_output>Perplexity: 571.92<jupyter_text>The perplexity is still quite high since for this demo we trained on a small dataset for a small number of epochs. For a real LM training, you would need a larger dataset and more epochs. If you used the callback above, you can now share this model with all your friends, family or favorite pets: they can all load it with the identifier `"your-username/the-name-you-picked"` so for instance:```pythonfrom transformers import TFAutoModelForCausalLMmodel = TFAutoModelForCausalLM.from_pretrained("your-username/my-awesome-model")``` Inference Models trained from scratch on small amounts of data will generally not output useful text - you'll need a much bigger dataset and a much longer training time before it starts writing text that you'd want to read! If you want to see an example of inference with causal language models, see the `language_modeling-tf` notebook, where we start with a pre-trained model and get higher-quality output much sooner as a result. Masked language modeling For masked language modeling (MLM) we are going to use the same preprocessing as before for our dataset with one additional step: we will randomly mask some tokens (by replacing them by `[MASK]`) and the labels will be adjusted to only include the masked tokens (we don't have to predict the non-masked tokens). If you use a tokenizer you trained yourself, make sure the `[MASK]` token is among the special tokens you passed during training!We will use the [`bert-base-cased`](https://huggingface.co/bert-based-cased) model for this example. You can pick any of the checkpoints listed [here](https://huggingface.co/models?filter=masked-lm) instead. For the tokenizer, replace the checkpoint by the one you trained.<jupyter_code>model_checkpoint = "bert-base-cased"<jupyter_output><empty_output><jupyter_text>We can apply the same tokenization function as before, we just need to update our tokenizer to use the checkpoint we just picked:<jupyter_code>tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) tokenized_datasets = datasets.map( tokenize_function, batched=True, num_proc=4, remove_columns=["text"] )<jupyter_output>Token indices sequence length is longer than the specified maximum sequence length for this model (571 > 512). Running this sequence through the model will result in indexing errors Token indices sequence length is longer than the specified maximum sequence length for this model (554 > 512). Running this sequence through the model will result in indexing errors Token indices sequence length is longer than the specified maximum sequence length for this model (522 > 512). Running this sequence through the model will result in indexing errors Token indices sequence length is longer than the specified maximum sequence length for this model (657 > 512). Running this sequence through the model will result in indexing errors Token indices sequence length is longer than the specified maximum sequence length for this model (514 > 512). Running this sequence through the model will result in indexing errors<jupyter_text>And like before, we group texts together and chunk them in samples of length `block_size`. You can skip that step if your dataset is composed of individual sentences.<jupyter_code>lm_datasets = tokenized_datasets.map( group_texts, batched=True, batch_size=1000, num_proc=4, )<jupyter_output><empty_output><jupyter_text>The rest is very similar to what we used before, with two exceptions. First we use a model suitable for masked LM:<jupyter_code>from transformers import AutoConfig, TFAutoModelForMaskedLM config = AutoConfig.from_pretrained(model_checkpoint) model = TFAutoModelForMaskedLM.from_config(config)<jupyter_output><empty_output><jupyter_text>We redefine our hyperparameters and choose a new name:<jupyter_code>learning_rate = 2e-5 weight_decay = 0.01 push_to_hub_model_id = f"{model_checkpoint}-wikitext2"<jupyter_output><empty_output><jupyter_text>Now we initialize our optimizer.<jupyter_code>from transformers import AdamWeightDecay optimizer = AdamWeightDecay(learning_rate=learning_rate, weight_decay_rate=weight_decay)<jupyter_output><empty_output><jupyter_text>And as before, we leave the `loss` argument blank to use the internal loss, and use `jit_compile` to enable XLA.<jupyter_code>import tensorflow as tf model.compile(optimizer=optimizer, jit_compile=True)<jupyter_output>No loss specified in compile() - the model's internal loss computation will be used as the loss. Don't panic - this is a common way to train TensorFlow models in Transformers! Please ensure your labels are passed as keys in the input dict so that they are accessible to the model during the forward pass. To disable this behaviour, please pass a loss argument, or explicitly pass loss=None if you do not want your model to compute a loss.<jupyter_text>Finally, we use a special `data_collator`. The `data_collator` is a function that is responsible for taking the samples and batching them in tensors. In the previous example, we had nothing special to do, so we just used the default for this argument. Here we want to randomly mask tokens. We could do it as a pre-processing step (like the tokenization) but then the tokens would always be masked the same way at each epoch. By doing this step inside the `data_collator`, we ensure this random masking is done in a new way each time we go over the data.To do this masking for us, the library provides a `DataCollatorForLanguageModeling`. We can adjust the probability of the masking. Note that our data collators are designed to work for multiple frameworks, so ensure you set the `return_tensors='np'` argument to get NumPy arrays out - you don't want to accidentally get a load of `torch.Tensor` objects in the middle of your nice TF code! You could also use `return_tensors='tf'` to get TensorFlow tensors, but our TF dataset pipeline actually uses a NumPy loader internally, which is wrapped at the end with a `tf.data.Dataset`. As a result, `np` is usually more reliable and performant when you're using it!<jupyter_code>from transformers import DataCollatorForLanguageModeling data_collator = DataCollatorForLanguageModeling( tokenizer=tokenizer, mlm_probability=0.15, return_tensors="np" )<jupyter_output><empty_output><jupyter_text>Now we pass our data collator to the `prepare_tf_dataset()` argument.<jupyter_code>train_set = model.prepare_tf_dataset( lm_datasets["train"], shuffle=True, batch_size=16, collate_fn=data_collator, ) validation_set = model.prepare_tf_dataset( lm_datasets["validation"], shuffle=False, batch_size=16, collate_fn=data_collator, )<jupyter_output><empty_output><jupyter_text>And now we can train our model:<jupyter_code>from transformers.keras_callbacks import PushToHubCallback from tensorflow.keras.callbacks import TensorBoard model_name = model_checkpoint.split("/")[-1] push_to_hub_model_id = f"{model_name}-finetuned-wikitext2" tensorboard_callback = TensorBoard(log_dir="./mlm_from_scratch_model_save/logs") push_to_hub_callback = PushToHubCallback( output_dir="./mlm_from_scratch_model_save", tokenizer=tokenizer, hub_model_id=push_to_hub_model_id, ) callbacks = [tensorboard_callback, push_to_hub_callback] model.fit(train_set, validation_data=validation_set, epochs=2, callbacks=callbacks)<jupyter_output>/home/matt/PycharmProjects/notebooks/examples/mlm_from_scratch_model_save is already a clone of https://huggingface.co/Rocketknight1/bert-base-cased-finetuned-wikitext2. Make sure you pull the latest changes with `repo.git_pull()`.<jupyter_text>Like before, we can evaluate our model on the validation set. As training progresses, the perplexity will be much lower for MLM than for the CLM objective because for the MLM objective, we only have to make predictions for the masked tokens (which represent 15% of the total here) while having access to the rest of the tokens. It's thus an easier task for the model.<jupyter_code>eval_loss = model.evaluate(validation_set) print(f"Perplexity: {math.exp(eval_loss):.2f}")<jupyter_output>126/126 [==============================] - 6s 44ms/step - loss: 6.2834 Perplexity: 535.62
notebooks/examples/language_modeling_from_scratch-tf.ipynb/0
{ "file_path": "notebooks/examples/language_modeling_from_scratch-tf.ipynb", "repo_id": "notebooks", "token_count": 7396 }
165
<jupyter_start><jupyter_text>If you're opening this Notebook on colab, you will probably need to install 🤗 Transformers and 🤗 Datasets. Uncomment the following cell and run it.<jupyter_code>#! pip install datasets transformers<jupyter_output><empty_output><jupyter_text>If you're opening this notebook locally, make sure your environment has an install from the last version of those libraries.To be able to share your model with the community and generate results like the one shown in the picture below via the inference API, there are a few more steps to follow.First you have to store your authentication token from the Hugging Face website (sign up [here](https://huggingface.co/join) if you haven't already!) then execute the following cell and input your username and password:<jupyter_code>from huggingface_hub import notebook_login notebook_login()<jupyter_output><empty_output><jupyter_text>Then you need to install Git-LFS. Uncomment the following instructions:<jupyter_code># !apt install git-lfs<jupyter_output><empty_output><jupyter_text>Make sure your version of Transformers is at least 4.11.0 since the functionality was introduced in that version:<jupyter_code>import transformers print(transformers.__version__)<jupyter_output><empty_output><jupyter_text>You can find a script version of this notebook to fine-tune your model in a distributed fashion using multiple GPUs or TPUs [here](https://github.com/huggingface/transformers/tree/master/examples/question-answering). We also quickly upload some telemetry - this tells us which examples and software versions are getting used so we know where to prioritize our maintenance efforts. We don't collect (or care about) any personally identifiable information, but if you'd prefer not to be counted, feel free to skip this step or delete this cell entirely.<jupyter_code>from transformers.utils import send_example_telemetry send_example_telemetry("question_answering_notebook", framework="pytorch")<jupyter_output><empty_output><jupyter_text>Fine-tuning a model on a question-answering task In this notebook, we will see how to fine-tune one of the [🤗 Transformers](https://github.com/huggingface/transformers) model to a question answering task, which is the task of extracting the answer to a question from a given context. We will see how to easily load a dataset for these kinds of tasks and use the `Trainer` API to fine-tune a model on it.**Note:** This notebook finetunes models that answer question by taking a substring of a context, not by generating new text. This notebook is built to run on any question answering task with the same format as SQUAD (version 1 or 2), with any model checkpoint from the [Model Hub](https://huggingface.co/models) as long as that model has a version with a token classification head and a fast tokenizer (check on [this table](https://huggingface.co/transformers/index.htmlbigtable) if this is the case). It might just need some small adjustments if you decide to use a different dataset than the one used here. Depending on you model and the GPU you are using, you might need to adjust the batch size to avoid out-of-memory errors. Set those three parameters, then the rest of the notebook should run smoothly:<jupyter_code># This flag is the difference between SQUAD v1 or 2 (if you're using another dataset, it indicates if impossible # answers are allowed or not). squad_v2 = False model_checkpoint = "distilbert-base-uncased" batch_size = 16<jupyter_output><empty_output><jupyter_text>Loading the dataset We will use the [🤗 Datasets](https://github.com/huggingface/datasets) library to download the data and get the metric we need to use for evaluation (to compare our model to the benchmark). This can be easily done with the functions `load_dataset` and `load_metric`.<jupyter_code>from datasets import load_dataset, load_metric<jupyter_output><empty_output><jupyter_text>For our example here, we'll use the [SQUAD dataset](https://rajpurkar.github.io/SQuAD-explorer/). The notebook should work with any question answering dataset provided by the 🤗 Datasets library. If you're using your own dataset defined from a JSON or csv file (see the [Datasets documentation](https://huggingface.co/docs/datasets/loading_datasets.htmlfrom-local-files) on how to load them), it might need some adjustments in the names of the columns used.<jupyter_code>datasets = load_dataset("squad_v2" if squad_v2 else "squad")<jupyter_output>Reusing dataset squad (/home/sgugger/.cache/huggingface/datasets/squad/plain_text/1.0.0/4c81550d83a2ac7c7ce23783bd8ff36642800e6633c1f18417fb58c3ff50cdd7)<jupyter_text>The `datasets` object itself is [`DatasetDict`](https://huggingface.co/docs/datasets/package_reference/main_classes.htmldatasetdict), which contains one key for the training, validation and test set.<jupyter_code>datasets<jupyter_output><empty_output><jupyter_text>We can see the training, validation and test sets all have a column for the context, the question and the answers to those questions. To access an actual element, you need to select a split first, then give an index:<jupyter_code>datasets["train"][0]<jupyter_output><empty_output><jupyter_text>We can see the answers are indicated by their start position in the text (here at character 515) and their full text, which is a substring of the context as we mentioned above. To get a sense of what the data looks like, the following function will show some examples picked randomly in the dataset (automatically decoding the labels in passing).<jupyter_code>from datasets import ClassLabel, Sequence import random import pandas as pd from IPython.display import display, HTML def show_random_elements(dataset, num_examples=10): assert num_examples <= len(dataset), "Can't pick more elements than there are in the dataset." picks = [] for _ in range(num_examples): pick = random.randint(0, len(dataset)-1) while pick in picks: pick = random.randint(0, len(dataset)-1) picks.append(pick) df = pd.DataFrame(dataset[picks]) for column, typ in dataset.features.items(): if isinstance(typ, ClassLabel): df[column] = df[column].transform(lambda i: typ.names[i]) elif isinstance(typ, Sequence) and isinstance(typ.feature, ClassLabel): df[column] = df[column].transform(lambda x: [typ.feature.names[i] for i in x]) display(HTML(df.to_html())) show_random_elements(datasets["train"])<jupyter_output><empty_output><jupyter_text>Preprocessing the training data Before we can feed those texts to our model, we need to preprocess them. This is done by a 🤗 Transformers `Tokenizer` which will (as the name indicates) tokenize the inputs (including converting the tokens to their corresponding IDs in the pretrained vocabulary) and put it in a format the model expects, as well as generate the other inputs that model requires.To do all of this, we instantiate our tokenizer with the `AutoTokenizer.from_pretrained` method, which will ensure:- we get a tokenizer that corresponds to the model architecture we want to use,- we download the vocabulary used when pretraining this specific checkpoint.That vocabulary will be cached, so it's not downloaded again the next time we run the cell.<jupyter_code>from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)<jupyter_output><empty_output><jupyter_text>The following assertion ensures that our tokenizer is a fast tokenizers (backed by Rust) from the 🤗 Tokenizers library. Those fast tokenizers are available for almost all models, and we will need some of the special features they have for our preprocessing.<jupyter_code>import transformers assert isinstance(tokenizer, transformers.PreTrainedTokenizerFast)<jupyter_output><empty_output><jupyter_text>You can check which type of models have a fast tokenizer available and which don't on the [big table of models](https://huggingface.co/transformers/index.htmlbigtable). You can directly call this tokenizer on two sentences (one for the answer, one for the context):<jupyter_code>tokenizer("What is your name?", "My name is Sylvain.")<jupyter_output><empty_output><jupyter_text>Depending on the model you selected, you will see different keys in the dictionary returned by the cell above. They don't matter much for what we're doing here (just know they are required by the model we will instantiate later), you can learn more about them in [this tutorial](https://huggingface.co/transformers/preprocessing.html) if you're interested.Now one specific thing for the preprocessing in question answering is how to deal with very long documents. We usually truncate them in other tasks, when they are longer than the model maximum sentence length, but here, removing part of the the context might result in losing the answer we are looking for. To deal with this, we will allow one (long) example in our dataset to give several input features, each of length shorter than the maximum length of the model (or the one we set as a hyper-parameter). Also, just in case the answer lies at the point we split a long context, we allow some overlap between the features we generate controlled by the hyper-parameter `doc_stride`:<jupyter_code>max_length = 384 # The maximum length of a feature (question and context) doc_stride = 128 # The authorized overlap between two part of the context when splitting it is needed.<jupyter_output><empty_output><jupyter_text>Let's find one long example in our dataset:<jupyter_code>for i, example in enumerate(datasets["train"]): if len(tokenizer(example["question"], example["context"])["input_ids"]) > 384: break example = datasets["train"][i]<jupyter_output><empty_output><jupyter_text>Without any truncation, we get the following length for the input IDs:<jupyter_code>len(tokenizer(example["question"], example["context"])["input_ids"])<jupyter_output><empty_output><jupyter_text>Now, if we just truncate, we will lose information (and possibly the answer to our question):<jupyter_code>len(tokenizer(example["question"], example["context"], max_length=max_length, truncation="only_second")["input_ids"])<jupyter_output><empty_output><jupyter_text>Note that we never want to truncate the question, only the context, else the `only_second` truncation picked. Now, our tokenizer can automatically return us a list of features capped by a certain maximum length, with the overlap we talked above, we just have to tell it with `return_overflowing_tokens=True` and by passing the stride:<jupyter_code>tokenized_example = tokenizer( example["question"], example["context"], max_length=max_length, truncation="only_second", return_overflowing_tokens=True, stride=doc_stride )<jupyter_output><empty_output><jupyter_text>Now we don't have one list of `input_ids`, but several:<jupyter_code>[len(x) for x in tokenized_example["input_ids"]]<jupyter_output><empty_output><jupyter_text>And if we decode them, we can see the overlap:<jupyter_code>for x in tokenized_example["input_ids"][:2]: print(tokenizer.decode(x))<jupyter_output>[CLS] how many wins does the notre dame men's basketball team have? [SEP] the men's basketball team has over 1, 600 wins, one of only 12 schools who have reached that mark, and have appeared in 28 ncaa tournaments. former player austin carr holds the record for most points scored in a single game of the tournament with 61. although the team has never won the ncaa tournament, they were named by the helms athletic foundation as national champions twice. the team has orchestrated a number of upsets of number one ranked teams, the most notable of which was ending ucla's record 88 - game winning streak in 1974. the team has beaten an additional eight number - one teams, and those nine wins rank second, to ucla's 10, all - time in wins against the top team. the team plays in newly renovated purcell pavilion ( within the edmund p. joyce center ), which reopened for the beginning of the 2009 – 2010 season. the team is coached by mike brey, who, as of the 2014 – 15 season, his fifteenth at notr[...]<jupyter_text>Now this will give us some work to properly treat the answers: we need to find in which of those features the answer actually is, and where exactly in that feature. The models we will use require the start and end positions of these answers in the tokens, so we will also need to to map parts of the original context to some tokens. Thankfully, the tokenizer we're using can help us with that by returning an `offset_mapping`:<jupyter_code>tokenized_example = tokenizer( example["question"], example["context"], max_length=max_length, truncation="only_second", return_overflowing_tokens=True, return_offsets_mapping=True, stride=doc_stride ) print(tokenized_example["offset_mapping"][0][:100])<jupyter_output>[(0, 0), (0, 3), (4, 8), (9, 13), (14, 18), (19, 22), (23, 28), (29, 33), (34, 37), (37, 38), (38, 39), (40, 50), (51, 55), (56, 60), (60, 61), (0, 0), (0, 3), (4, 7), (7, 8), (8, 9), (10, 20), (21, 25), (26, 29), (30, 34), (35, 36), (36, 37), (37, 40), (41, 45), (45, 46), (47, 50), (51, 53), (54, 58), (59, 61), (62, 69), (70, 73), (74, 78), (79, 86), (87, 91), (92, 96), (96, 97), (98, 101), (102, 106), (107, 115), (116, 118), (119, 121), (122, 126), (127, 138), (138, 139), (140, 146), (147, 153), (154, 160), (161, 165), (166, 171), (172, 175), (176, 182), (183, 186), (187, 191), (192, 198), (199, 205), (206, 208), (209, 210), (211, 217), (218, 222), (223, 225), (226, 229), (230, 240), (241, 245), (246, 248), (248, 249), (250, 258), (259, 262), (263, 267), (268, 271), (272, 277), (278, 281), (282, 285), (286, 290), (291, 301), (301, 302), (303, 307), (308, 312), (313, 318), (319, 321), (322, 325), (326, 330), (330, 331), (332, 340), (341, 351), (352, 354), (355, 363), (364, 373), (374,[...]<jupyter_text>This gives, for each index of our input IDS, the corresponding start and end character in the original text that gave our token. The very first token (`[CLS]`) has (0, 0) because it doesn't correspond to any part of the question/answer, then the second token is the same as the characters 0 to 3 of the question:<jupyter_code>first_token_id = tokenized_example["input_ids"][0][1] offsets = tokenized_example["offset_mapping"][0][1] print(tokenizer.convert_ids_to_tokens([first_token_id])[0], example["question"][offsets[0]:offsets[1]])<jupyter_output>how How<jupyter_text>So we can use this mapping to find the position of the start and end tokens of our answer in a given feature. We just have to distinguish which parts of the offsets correspond to the question and which part correspond to the context, this is where the `sequence_ids` method of our `tokenized_example` can be useful:<jupyter_code>sequence_ids = tokenized_example.sequence_ids() print(sequence_ids)<jupyter_output>[None, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, None, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, [...]<jupyter_text>It returns `None` for the special tokens, then 0 or 1 depending on whether the corresponding token comes from the first sentence past (the question) or the second (the context). Now with all of this, we can find the first and last token of the answer in one of our input feature (or if the answer is not in this feature):<jupyter_code>answers = example["answers"] start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != 1: token_start_index += 1 # End token index of the current span in the text. token_end_index = len(tokenized_example["input_ids"][0]) - 1 while sequence_ids[token_end_index] != 1: token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). offsets = tokenized_example["offset_mapping"][0] if (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): # Move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 start_position = token_start_index - 1 while offsets[token_end_index][1] >= end_char: token_end_index -= 1 end_position = token_end_index + 1 print(start_position, end_position) else: print("The answer is not in this feature.")<jupyter_output>23 26<jupyter_text>And we can double check that it is indeed the theoretical answer:<jupyter_code>print(tokenizer.decode(tokenized_example["input_ids"][0][start_position: end_position+1])) print(answers["text"][0])<jupyter_output>over 1, 600 over 1,600<jupyter_text>For this notebook to work with any kind of models, we need to account for the special case where the model expects padding on the left (in which case we switch the order of the question and the context):<jupyter_code>pad_on_right = tokenizer.padding_side == "right"<jupyter_output><empty_output><jupyter_text>Now let's put everything together in one function we will apply to our training set. In the case of impossible answers (the answer is in another feature given by an example with a long context), we set the cls index for both the start and end position. We could also simply discard those examples from the training set if the flag `allow_impossible_answers` is `False`. Since the preprocessing is already complex enough as it is, we've kept is simple for this part.<jupyter_code>def prepare_train_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples["question"] = [q.lstrip() for q in examples["question"]] # Tokenize our examples with truncation and padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples["question" if pad_on_right else "context"], examples["context" if pad_on_right else "question"], truncation="only_second" if pad_on_right else "only_first", max_length=max_length, stride=doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # The offset mappings will give us a map from token to character position in the original context. This will # help us compute the start_positions and end_positions. offset_mapping = tokenized_examples.pop("offset_mapping") # Let's label those examples! tokenized_examples["start_positions"] = [] tokenized_examples["end_positions"] = [] for i, offsets in enumerate(offset_mapping): # We will label impossible answers with the index of the CLS token. input_ids = tokenized_examples["input_ids"][i] cls_index = input_ids.index(tokenizer.cls_token_id) # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] answers = examples["answers"][sample_index] # If no answers are given, set the cls_index as answer. if len(answers["answer_start"]) == 0: tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Start/end character index of the answer in the text. start_char = answers["answer_start"][0] end_char = start_char + len(answers["text"][0]) # Start token index of the current span in the text. token_start_index = 0 while sequence_ids[token_start_index] != (1 if pad_on_right else 0): token_start_index += 1 # End token index of the current span in the text. token_end_index = len(input_ids) - 1 while sequence_ids[token_end_index] != (1 if pad_on_right else 0): token_end_index -= 1 # Detect if the answer is out of the span (in which case this feature is labeled with the CLS index). if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): tokenized_examples["start_positions"].append(cls_index) tokenized_examples["end_positions"].append(cls_index) else: # Otherwise move the token_start_index and token_end_index to the two ends of the answer. # Note: we could go after the last offset if the answer is the last word (edge case). while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: token_start_index += 1 tokenized_examples["start_positions"].append(token_start_index - 1) while offsets[token_end_index][1] >= end_char: token_end_index -= 1 tokenized_examples["end_positions"].append(token_end_index + 1) return tokenized_examples<jupyter_output><empty_output><jupyter_text>This function works with one or several examples. In the case of several examples, the tokenizer will return a list of lists for each key:<jupyter_code>features = prepare_train_features(datasets['train'][:5])<jupyter_output><empty_output><jupyter_text>To apply this function on all the sentences (or pairs of sentences) in our dataset, we just use the `map` method of our `dataset` object we created earlier. This will apply the function on all the elements of all the splits in `dataset`, so our training, validation and testing data will be preprocessed in one single command. Since our preprocessing changes the number of samples, we need to remove the old columns when applying it.<jupyter_code>tokenized_datasets = datasets.map(prepare_train_features, batched=True, remove_columns=datasets["train"].column_names)<jupyter_output>Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/squad/plain_text/1.0.0/4c81550d83a2ac7c7ce23783bd8ff36642800e6633c1f18417fb58c3ff50cdd7/cache-a5c71e98733887b0.arrow Loading cached processed dataset at /home/sgugger/.cache/huggingface/datasets/squad/plain_text/1.0.0/4c81550d83a2ac7c7ce23783bd8ff36642800e6633c1f18417fb58c3ff50cdd7/cache-14932a8c6aecc96d.arrow<jupyter_text>Even better, the results are automatically cached by the 🤗 Datasets library to avoid spending time on this step the next time you run your notebook. The 🤗 Datasets library is normally smart enough to detect when the function you pass to map has changed (and thus requires to not use the cache data). For instance, it will properly detect if you change the task in the first cell and rerun the notebook. 🤗 Datasets warns you when it uses cached files, you can pass `load_from_cache_file=False` in the call to `map` to not use the cached files and force the preprocessing to be applied again.Note that we passed `batched=True` to encode the texts by batches together. This is to leverage the full benefit of the fast tokenizer we loaded earlier, which will use multi-threading to treat the texts in a batch concurrently. Fine-tuning the model Now that our data is ready for training, we can download the pretrained model and fine-tune it. Since our task is question answering, we use the `AutoModelForQuestionAnswering` class. Like with the tokenizer, the `from_pretrained` method will download and cache the model for us:<jupyter_code>from transformers import AutoModelForQuestionAnswering, TrainingArguments, Trainer model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)<jupyter_output>Some weights of the model checkpoint at distilbert-base-uncased were not used when initializing DistilBertForQuestionAnswering: ['vocab_transform.weight', 'vocab_transform.bias', 'vocab_layer_norm.weight', 'vocab_layer_norm.bias', 'vocab_projector.weight', 'vocab_projector.bias'] - This IS expected if you are initializing DistilBertForQuestionAnswering from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing DistilBertForQuestionAnswering from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of DistilBertForQuestionAnswering were not initialized from the model checkpoint at distilbert-base-uncased and are newly initialized: ['qa_outputs.weight', 'qa_outputs.bias'] You should probably TRAIN this mode[...]<jupyter_text>The warning is telling us we are throwing away some weights (the `vocab_transform` and `vocab_layer_norm` layers) and randomly initializing some other (the `pre_classifier` and `classifier` layers). This is absolutely normal in this case, because we are removing the head used to pretrain the model on a masked language modeling objective and replacing it with a new head for which we don't have pretrained weights, so the library warns us we should fine-tune this model before using it for inference, which is exactly what we are going to do. To instantiate a `Trainer`, we will need to define three more things. The most important is the [`TrainingArguments`](https://huggingface.co/transformers/main_classes/trainer.htmltransformers.TrainingArguments), which is a class that contains all the attributes to customize the training. It requires one folder name, which will be used to save the checkpoints of the model, and all other arguments are optional:<jupyter_code>model_name = model_checkpoint.split("/")[-1] args = TrainingArguments( f"{model_name}-finetuned-squad", evaluation_strategy = "epoch", learning_rate=2e-5, per_device_train_batch_size=batch_size, per_device_eval_batch_size=batch_size, num_train_epochs=3, weight_decay=0.01, push_to_hub=True, )<jupyter_output><empty_output><jupyter_text>Here we set the evaluation to be done at the end of each epoch, tweak the learning rate, use the `batch_size` defined at the top of the notebook and customize the number of epochs for training, as well as the weight decay.The last argument to setup everything so we can push the model to the [Hub](https://huggingface.co/models) regularly during training. Remove it if you didn't follow the installation steps at the top of the notebook. If you want to save your model locally in a name that is different than the name of the repository it will be pushed, or if you want to push your model under an organization and not your name space, use the `hub_model_id` argument to set the repo name (it needs to be the full name, including your namespace: for instance `"sgugger/bert-finetuned-squad"` or `"huggingface/bert-finetuned-squad"`). Then we will need a data collator that will batch our processed examples together, here the default one will work:<jupyter_code>from transformers import default_data_collator data_collator = default_data_collator<jupyter_output><empty_output><jupyter_text>We will evaluate our model and compute metrics in the next section (this is a very long operation, so we will only compute the evaluation loss during training).Then we just need to pass all of this along with our datasets to the `Trainer`:<jupyter_code>trainer = Trainer( model, args, train_dataset=tokenized_datasets["train"], eval_dataset=tokenized_datasets["validation"], data_collator=data_collator, tokenizer=tokenizer, )<jupyter_output><empty_output><jupyter_text>We can now finetune our model by just calling the `train` method:<jupyter_code>trainer.train()<jupyter_output><empty_output><jupyter_text>Since this training is particularly long, let's save the model just in case we need to restart.<jupyter_code>trainer.save_model("test-squad-trained")<jupyter_output><empty_output><jupyter_text>Evaluation Evaluating our model will require a bit more work, as we will need to map the predictions of our model back to parts of the context. The model itself predicts logits for the start and en position of our answers: if we take a batch from our validation datalaoder, here is the output our model gives us:<jupyter_code>import torch for batch in trainer.get_eval_dataloader(): break batch = {k: v.to(trainer.args.device) for k, v in batch.items()} with torch.no_grad(): output = trainer.model(**batch) output.keys()<jupyter_output><empty_output><jupyter_text>The output of the model is a dict-like object that contains the loss (since we provided labels), the start and end logits. We won't need the loss for our predictions, let's have a look a the logits:<jupyter_code>output.start_logits.shape, output.end_logits.shape<jupyter_output><empty_output><jupyter_text>We have one logit for each feature and each token. The most obvious thing to predict an answer for each featyre is to take the index for the maximum of the start logits as a start position and the index of the maximum of the end logits as an end position.<jupyter_code>output.start_logits.argmax(dim=-1), output.end_logits.argmax(dim=-1)<jupyter_output><empty_output><jupyter_text>This will work great in a lot of cases, but what if this prediction gives us something impossible: the start position could be greater than the end position, or point to a span of text in the question instead of the answer. In that case, we might want to look at the second best prediction to see if it gives a possible answer and select that instead.However, picking the second best answer is not as easy as picking the best one: is it the second best index in the start logits with the best index in the end logits? Or the best index in the start logits with the second best index in the end logits? And if that second best answer is not possible either, it gets even trickier for the third best answer.To classify our answers, we will use the score obtained by adding the start and end logits. We won't try to order all the possible answers and limit ourselves to with a hyper-parameter we call `n_best_size`. We'll pick the best indices in the start and end logits and gather all the answers this predicts. After checking if each one is valid, we will sort them by their score and keep the best one. Here is how we would do this on the first feature in the batch:<jupyter_code>n_best_size = 20 import numpy as np start_logits = output.start_logits[0].cpu().numpy() end_logits = output.end_logits[0].cpu().numpy() # Gather the indices the best start/end logits: start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() valid_answers = [] for start_index in start_indexes: for end_index in end_indexes: if start_index <= end_index: # We need to refine that test to check the answer is inside the context valid_answers.append( { "score": start_logits[start_index] + end_logits[end_index], "text": "" # We need to find a way to get back the original substring corresponding to the answer in the context } )<jupyter_output><empty_output><jupyter_text>And then we can sort the `valid_answers` according to their `score` and only keep the best one. The only point left is how to check a given span is inside the context (and not the question) and how to get back the text inside. To do this, we need to add two things to our validation features:- the ID of the example that generated the feature (since each example can generate several features, as seen before);- the offset mapping that will give us a map from token indices to character positions in the context.That's why we will re-process the validation set with the following function, slightly different from `prepare_train_features`:<jupyter_code>def prepare_validation_features(examples): # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace examples["question"] = [q.lstrip() for q in examples["question"]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. tokenized_examples = tokenizer( examples["question" if pad_on_right else "context"], examples["context" if pad_on_right else "question"], truncation="only_second" if pad_on_right else "only_first", max_length=max_length, stride=doc_stride, return_overflowing_tokens=True, return_offsets_mapping=True, padding="max_length", ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") # We keep the example_id that gave us this feature and we will store the offset mappings. tokenized_examples["example_id"] = [] for i in range(len(tokenized_examples["input_ids"])): # Grab the sequence corresponding to that example (to know what is the context and what is the question). sequence_ids = tokenized_examples.sequence_ids(i) context_index = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. sample_index = sample_mapping[i] tokenized_examples["example_id"].append(examples["id"][sample_index]) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. tokenized_examples["offset_mapping"][i] = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples["offset_mapping"][i]) ] return tokenized_examples<jupyter_output><empty_output><jupyter_text>And like before, we can apply that function to our validation set easily:<jupyter_code>validation_features = datasets["validation"].map( prepare_validation_features, batched=True, remove_columns=datasets["validation"].column_names )<jupyter_output><empty_output><jupyter_text>Now we can grab the predictions for all features by using the `Trainer.predict` method:<jupyter_code>raw_predictions = trainer.predict(validation_features)<jupyter_output><empty_output><jupyter_text>The `Trainer` *hides* the columns that are not used by the model (here `example_id` and `offset_mapping` which we will need for our post-processing), so we set them back:<jupyter_code>validation_features.set_format(type=validation_features.format["type"], columns=list(validation_features.features.keys()))<jupyter_output><empty_output><jupyter_text>We can now refine the test we had before: since we set `None` in the offset mappings when it corresponds to a part of the question, it's easy to check if an answer is fully inside the context. We also eliminate very long answers from our considerations (with an hyper-parameter we can tune)<jupyter_code>max_answer_length = 30 start_logits = output.start_logits[0].cpu().numpy() end_logits = output.end_logits[0].cpu().numpy() offset_mapping = validation_features[0]["offset_mapping"] # The first feature comes from the first example. For the more general case, we will need to be match the example_id to # an example index context = datasets["validation"][0]["context"] # Gather the indices the best start/end logits: start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() valid_answers = [] for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or offset_mapping[end_index] is None ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue if start_index <= end_index: # We need to refine that test to check the answer is inside the context start_char = offset_mapping[start_index][0] end_char = offset_mapping[end_index][1] valid_answers.append( { "score": start_logits[start_index] + end_logits[end_index], "text": context[start_char: end_char] } ) valid_answers = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[:n_best_size] valid_answers<jupyter_output><empty_output><jupyter_text>We can compare to the actual ground-truth answer:<jupyter_code>datasets["validation"][0]["answers"]<jupyter_output><empty_output><jupyter_text>Our model picked the right as the most likely answer!As we mentioned in the code above, this was easy on the first feature because we knew it comes from the first example. For the other features, we will need a map between examples and their corresponding features. Also, since one example can give several features, we will need to gather together all the answers in all the features generated by a given example, then pick the best one. The following code builds a map from example index to its corresponding features indices:<jupyter_code>import collections examples = datasets["validation"] features = validation_features example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i)<jupyter_output><empty_output><jupyter_text>We're almost ready for our post-processing function. The last bit to deal with is the impossible answer (when `squad_v2 = True`). The code above only keeps answers that are inside the context, we need to also grab the score for the impossible answer (which has start and end indices corresponding to the index of the CLS token). When one example gives several features, we have to predict the impossible answer when all the features give a high score to the impossible answer (since one feature could predict the impossible answer just because the answer isn't in the part of the context it has access too), which is why the score of the impossible answer for one example is the *minimum* of the scores for the impossible answer in each feature generated by the example.We then predict the impossible answer when that score is greater than the score of the best non-impossible answer. All combined together, this gives us this post-processing function:<jupyter_code>from tqdm.auto import tqdm def postprocess_qa_predictions(examples, features, raw_predictions, n_best_size = 20, max_answer_length = 30): all_start_logits, all_end_logits = raw_predictions # Build a map example to its corresponding features. example_id_to_index = {k: i for i, k in enumerate(examples["id"])} features_per_example = collections.defaultdict(list) for i, feature in enumerate(features): features_per_example[example_id_to_index[feature["example_id"]]].append(i) # The dictionaries we have to fill. predictions = collections.OrderedDict() # Logging. print(f"Post-processing {len(examples)} example predictions split into {len(features)} features.") # Let's loop over all the examples! for example_index, example in enumerate(tqdm(examples)): # Those are the indices of the features associated to the current example. feature_indices = features_per_example[example_index] min_null_score = None # Only used if squad_v2 is True. valid_answers = [] context = example["context"] # Looping through all the features associated to the current example. for feature_index in feature_indices: # We grab the predictions of the model for this feature. start_logits = all_start_logits[feature_index] end_logits = all_end_logits[feature_index] # This is what will allow us to map some the positions in our logits to span of texts in the original # context. offset_mapping = features[feature_index]["offset_mapping"] # Update minimum null prediction. cls_index = features[feature_index]["input_ids"].index(tokenizer.cls_token_id) feature_null_score = start_logits[cls_index] + end_logits[cls_index] if min_null_score is None or min_null_score < feature_null_score: min_null_score = feature_null_score # Go through all possibilities for the `n_best_size` greater start and end logits. start_indexes = np.argsort(start_logits)[-1 : -n_best_size - 1 : -1].tolist() end_indexes = np.argsort(end_logits)[-1 : -n_best_size - 1 : -1].tolist() for start_index in start_indexes: for end_index in end_indexes: # Don't consider out-of-scope answers, either because the indices are out of bounds or correspond # to part of the input_ids that are not in the context. if ( start_index >= len(offset_mapping) or end_index >= len(offset_mapping) or offset_mapping[start_index] is None or offset_mapping[end_index] is None ): continue # Don't consider answers with a length that is either < 0 or > max_answer_length. if end_index < start_index or end_index - start_index + 1 > max_answer_length: continue start_char = offset_mapping[start_index][0] end_char = offset_mapping[end_index][1] valid_answers.append( { "score": start_logits[start_index] + end_logits[end_index], "text": context[start_char: end_char] } ) if len(valid_answers) > 0: best_answer = sorted(valid_answers, key=lambda x: x["score"], reverse=True)[0] else: # In the very rare edge case we have not a single non-null prediction, we create a fake prediction to avoid # failure. best_answer = {"text": "", "score": 0.0} # Let's pick our final answer: the best one or the null answer (only for squad_v2) if not squad_v2: predictions[example["id"]] = best_answer["text"] else: answer = best_answer["text"] if best_answer["score"] > min_null_score else "" predictions[example["id"]] = answer return predictions<jupyter_output><empty_output><jupyter_text>And we can apply our post-processing function to our raw predictions:<jupyter_code>final_predictions = postprocess_qa_predictions(datasets["validation"], validation_features, raw_predictions.predictions)<jupyter_output>Post-processing 10570 example predictions split into 10784 features.<jupyter_text>Then we can load the metric from the datasets library.<jupyter_code>metric = load_metric("squad_v2" if squad_v2 else "squad")<jupyter_output><empty_output><jupyter_text>Then we can call compute on it. We just need to format predictions and labels a bit as it expects a list of dictionaries and not one big dictionary. In the case of squad_v2, we also have to set a `no_answer_probability` argument (which we set to 0.0 here as we have already set the answer to empty if we picked it).<jupyter_code>if squad_v2: formatted_predictions = [{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in final_predictions.items()] else: formatted_predictions = [{"id": k, "prediction_text": v} for k, v in final_predictions.items()] references = [{"id": ex["id"], "answers": ex["answers"]} for ex in datasets["validation"]] metric.compute(predictions=formatted_predictions, references=references)<jupyter_output><empty_output><jupyter_text>You can now upload the result of the training to the Hub, just execute this instruction:<jupyter_code>trainer.push_to_hub()<jupyter_output><empty_output>
notebooks/examples/question_answering.ipynb/0
{ "file_path": "notebooks/examples/question_answering.ipynb", "repo_id": "notebooks", "token_count": 15331 }
166
<jupyter_start><jupyter_text>Time Series DatasetsThis notebook shows how to create a time series dataset from some csv file in order to then share it on the [🤗 hub](https://huggingface.co/docs/datasets/index). We will use the GluonTS library to read the csv into the appropriate format. We start by installing the libraries<jupyter_code>! pip install -q datasets gluonts orjson<jupyter_output><empty_output><jupyter_text>GluonTS comes with a pandas DataFrame based dataset so our strategy will be to read the csv file, and process it as a `PandasDataset`. We will then iterate over it and convert it to a 🤗 dataset with the appropriate schema for time series. So lets get started! `PandasDataset`Suppose we are given multiple (10) time series stacked on top of each other in a dataframe with an `item_id` column that distinguishes different series:<jupyter_code>import pandas as pd url = ( "https://gist.githubusercontent.com/rsnirwan/a8b424085c9f44ef2598da74ce43e7a3" "/raw/b6fdef21fe1f654787fa0493846c546b7f9c4df2/ts_long.csv" ) df = pd.read_csv(url, index_col=0, parse_dates=True) df.head()<jupyter_output><empty_output><jupyter_text>After converting it into a `pd.Dataframe` we can then convert it into GluonTS's `PandasDataset`:<jupyter_code>from gluonts.dataset.pandas import PandasDataset ds = PandasDataset.from_long_dataframe(df, target="target", item_id="item_id")<jupyter_output><empty_output><jupyter_text>🤗 DatasetsFrom here we have to map the pandas dataset's `start` field into a time stamp instead of a `pd.Period`. We do this by defining the following class:<jupyter_code>class ProcessStartField(): ts_id = 0 def __call__(self, data): data["start"] = data["start"].to_timestamp() data["feat_static_cat"] = [self.ts_id] self.ts_id += 1 return data from gluonts.itertools import Map process_start = ProcessStartField() list_ds = list(Map(process_start, ds))<jupyter_output><empty_output><jupyter_text>Next we need to define our schema features and create our dataset from this list via the `from_list` function:<jupyter_code>from datasets import Dataset, Features, Value, Sequence features = Features( { "start": Value("timestamp[s]"), "target": Sequence(Value("float32")), "feat_static_cat": Sequence(Value("uint64")), # "feat_static_real": Sequence(Value("float32")), # "feat_dynamic_real": Sequence(Sequence(Value("uint64"))), # "feat_dynamic_cat": Sequence(Sequence(Value("uint64"))), "item_id": Value("string"), } ) dataset = Dataset.from_list(list_ds, features=features)<jupyter_output><empty_output>
notebooks/examples/time_series_datasets.ipynb/0
{ "file_path": "notebooks/examples/time_series_datasets.ipynb", "repo_id": "notebooks", "token_count": 975 }
167
<jupyter_start><jupyter_text>Huggingface Sagemaker-sdk - Run a batch transform inference job with 🤗 Transformers 1. [Introduction](Introduction) 2. [Run Batch Transform after training a model](Run-Batch-Transform-after-training-a-model) 3. [Run Batch Transform Inference Job with a fine-tuned model using `jsonl`](Run-Batch-Transform-Inference-Job-with-a-fine-tuned-model-using-jsonl) Welcome to this getting started guide, we will use the new Hugging Face Inference DLCs and Amazon SageMaker Python SDK to deploy two transformer model for inference. In the first example we deploy a trained Hugging Face Transformer model on to SageMaker for inference.In the second example we directly deploy one of the 10 000+ Hugging Face Transformers from the [Hub](https://huggingface.co/models) to Amazon SageMaker for Inference.< Run Batch Transform after training a model _not included in the notebook_After you train a model, you can use [Amazon SageMaker Batch Transform](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html) to perform inferences with the model. In Batch Transform you provide your inference data as a S3 uri and SageMaker will care of downloading it, running the prediction and uploading the results afterwards to S3 again. You can find more documentation for Batch Transform [here](https://docs.aws.amazon.com/sagemaker/latest/dg/batch-transform.html)If you trained the model using the **HuggingFace estimator**, you can invoke `transformer()` method to create a transform job for a model based on the training job.```pythonbatch_job = huggingface_estimator.transformer( instance_count=1, instance_type='ml.c5.2xlarge', strategy='SingleRecord')batch_job.transform( data='s3://s3-uri-to-batch-data', content_type='application/json', split_type='Line')```For more details about what can be specified here, see [API docs](https://sagemaker.readthedocs.io/en/stable/overview.htmlsagemaker-batch-transform).<jupyter_code>!pip install "sagemaker>=2.48.0" "datasets==1.11" --upgrade<jupyter_output><empty_output><jupyter_text>Run Batch Transform Inference Job with a fine-tuned model using `jsonl` Data Pre-ProcessingIn this example we are using the provided `tweet_data.csv` as dataset. The `csv` contains ~1800 tweets about different airlines. The `csv` contains 1 column `"inputs"` with the tweets. To use this `csv` we need to convert it into a `jsonl` file and upload it to s3. Due to the complex structure of text are only `jsonl` file supported for batch transform. As pre-processing we are removing the `@` in the beginning of the tweet to get the names/identities correct._**NOTE**: While preprocessing you need to make sure that your `inputs` fit the `max_length`.<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}") import csv import json from sagemaker.s3 import S3Uploader,s3_path_join # datset files dataset_csv_file="tweet_data.csv" dataset_jsonl_file="tweet_data.jsonl" with open(dataset_csv_file, "r+") as infile, open(dataset_jsonl_file, "w+") as outfile: reader = csv.DictReader(infile) for row in reader: # remove @ row["inputs"] = row["inputs"].replace("@","") json.dump(row, outfile) outfile.write('\n') # uploads a given file to S3. input_s3_path = s3_path_join("s3://",sagemaker_session_bucket,"batch_transform/input") output_s3_path = s3_path_join("s3://",sagemaker_session_bucket,"batch_transform/output") s3_file_uri = S3Uploader.upload(dataset_jsonl_file,input_s3_path) print(f"{dataset_jsonl_file} uploaded to {s3_file_uri}")<jupyter_output>tweet_data.jsonl uploaded to s3://sagemaker-us-east-1-558105141721/batch_transform/input/tweet_data.jsonl<jupyter_text>The created file looks like this```json{"inputs": "VirginAmerica What dhepburn said."}{"inputs": "VirginAmerica plus you've added commercials to the experience... tacky."}{"inputs": "VirginAmerica I didn't today... Must mean I need to take another trip!"}{"inputs": "VirginAmerica it's really aggressive to blast obnoxious \"entertainment\"...."}{"inputs": "VirginAmerica and it's a really big bad thing about it"}{"inputs": "VirginAmerica seriously would pay $30 a flight for seats that didn't h...."}{"inputs": "VirginAmerica yes, nearly every time I fly VX this \u201cear worm\u201d won\u2019t go away :)"}{"inputs": "VirginAmerica Really missed a prime opportunity for Men Without ..."}{"inputs": "virginamerica Well, I didn't\u2026but NOW I DO! :-D"}{"inputs": "VirginAmerica it was amazing, and arrived an hour early. You're too good to me."}{"inputs": "VirginAmerica did you know that suicide is the second leading cause of death among teens 10-24"}{"inputs": "VirginAmerica I &lt;3 pretty graphics. so much better than minimal iconography. :D"}{"inputs": "VirginAmerica This is such a great deal! Already thinking about my 2nd trip ..."}....``` Create Inference Transformer to run the batch jobWe use the [twitter-roberta-base-sentiment](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment) model running our batch transform job. This is a RoBERTa-base model trained on ~58M tweets and finetuned for sentiment analysis with the TweetEval benchmark.<jupyter_code>from sagemaker.huggingface.model import HuggingFaceModel # Hub Model configuration. <https://huggingface.co/models> hub = { 'HF_MODEL_ID':'cardiffnlp/twitter-roberta-base-sentiment', 'HF_TASK':'text-classification' } # create Hugging Face Model Class huggingface_model = HuggingFaceModel( env=hub, # configuration for loading model from Hub role=role, # iam role with permissions to create an Endpoint transformers_version="4.26", # transformers version used pytorch_version="1.13", # pytorch version used py_version='py39', # python version used ) # create Transformer to run our batch job batch_job = huggingface_model.transformer( instance_count=1, instance_type='ml.p3.2xlarge', output_path=output_s3_path, # we are using the same s3 path to save the output with the input strategy='SingleRecord') # starts batch transform job and uses s3 data as input batch_job.transform( data=s3_file_uri, content_type='application/json', split_type='Line') import json from sagemaker.s3 import S3Downloader from ast import literal_eval # creating s3 uri for result file -> input file + .out output_file = f"{dataset_jsonl_file}.out" output_path = s3_path_join(output_s3_path,output_file) # download file S3Downloader.download(output_path,'.') batch_transform_result = [] with open(output_file) as f: for line in f: # converts jsonline array to normal array line = "[" + line.replace("[","").replace("]",",") + "]" batch_transform_result = literal_eval(line) # print results print(batch_transform_result[:3])<jupyter_output>INFO:botocore.credentials:Found credentials from IAM Role: BaseNotebookInstanceEc2InstanceRole
notebooks/sagemaker/12_batch_transform_inference/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/12_batch_transform_inference/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 2457 }
168
# accelerate-aws-sagemaker Examples showcasing AWS SageMaker integration of 🤗 Accelerate. Just give the `accelerate config` and do `accelerate launch` 🚀. It's as simple as that! 1. Set up the accelerate config by running `accelerate config --config_file accelerate_config.yaml` and answer the SageMaker questions. 2. Below is a sample config which is using aws `profile` to launch training job using 🤗 SageMaker estimator. It also has the `iam_role_name` which has the needed SageMaker permissions specified. In this config it is replaced `xxxxx` as user needs to specify it based on their corresponding AWS setup. ```yaml base_job_name: accelerate-sagemaker-1 compute_environment: AMAZON_SAGEMAKER distributed_type: DATA_PARALLEL ec2_instance_type: ml.p3.16xlarge iam_role_name: xxxxx image_uri: null mixed_precision: fp16 num_machines: 1 profile: xxxxx py_version: py38 pytorch_version: 1.10.2 region: us-east-1 transformers_version: 4.17.0 use_cpu: false ``` 3. One can specify a custom docker image instead of Official 🤗 DLCs through the accelerate config questionnaire. When this isn't provided, the latest Official 🤗 DLC will be used. 4. Support for input channels pointing to S3 data locations via TSV file, e.g., below are the contents of sagemaker_inputs.tsv whose location is given as part of accelerate config setup. ```tsv channel_name data_location train s3://sagemaker-sample/samples/datasets/imdb/train test s3://sagemaker-sample/samples/datasets/imdb/test ``` 5. Support for SageMaker metrics logging via TSV file, e.g., below are the contents of the sagemaker_metrics_definition.tsv whose location is given as part of accelerate config setup. ```tsv metric_name metric_regex accuracy 'accuracy': ([0-9.]+) f1 'f1': ([0-9.]+) ``` 6. Example of accelerate config with above features setup [XXXXX values are AWS account specific]: ```yaml base_job_name: accelerate-sagemaker-1 compute_environment: AMAZON_SAGEMAKER distributed_type: DATA_PARALLEL ec2_instance_type: ml.p3.16xlarge iam_role_name: XXXXX image_uri: 763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.8.1-transformers4.10.2-gpu-py36-cu111-ubuntu18.04 mixed_precision: fp16 num_machines: 1 profile: XXXXX py_version: py38 pytorch_version: 1.10.2 region: us-east-1 sagemaker_inputs_file: sagemaker_inputs.tsv sagemaker_metrics_file: sagemaker_metrics_definition.tsv transformers_version: 4.17.0 use_cpu: false ``` 7. Put `requirements.txt` with all the needed libraries for running the training script. 8. Running `text-classification` example using s3 datasets (from the root directory): ```bash cd src/text-classification bash launch.sh ``` The contents of launch.sh ```bash accelerate launch --config_file accelerate_config.yaml train_using_s3_data.py \ --mixed_precision "fp16" ``` Output logs: ```bash ... [1,mpirank:0,algo-1]<stdout>:algo-1:79:1300 [0] NCCL INFO Launch mode Parallel [1,mpirank:0,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:3,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:1,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:2,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:6,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:5,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:7,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:4,algo-1]<stderr>:INFO:root:Reducer buckets have been rebuilt in this iteration. [1,mpirank:0,algo-1]<stdout>:epoch 0: {'accuracy': 0.6838235294117647, 'f1': 0.8122270742358079} [1,mpirank:0,algo-1]<stdout>:epoch 1: {'accuracy': 0.7205882352941176, 'f1': 0.8256880733944955} [1,mpirank:0,algo-1]<stdout>:epoch 2: {'accuracy': 0.75, 'f1': 0.838095238095238} 2022-09-21 13:21:05,187 sagemaker-training-toolkit INFO Waiting for the process to finish and give a return code. 2022-09-21 13:21:05,188 sagemaker-training-toolkit INFO Done waiting for a return code. Received 0 from exiting process. 2022-09-21 13:21:05,188 sagemaker-training-toolkit INFO Reporting training SUCCESS ``` 9. Running `seq2seq` example: ```bash cd src/seq2seq bash launch.sh ``` The contents of launch.sh ```bash accelerate launch --config_file accelerate_config.yaml run_seq2seq_no_trainer.py \ --dataset_name "smangrul/MuDoConv" \ --max_source_length 128 \ --source_prefix "chatbot: " \ --max_target_length 64 \ --val_max_target_length 64 \ --val_min_target_length 20 \ --n_val_batch_generations 5 \ --n_train 10000 \ --n_val 1000 \ --pad_to_max_length True\ --num_beams 10 \ --model_name_or_path "facebook/blenderbot-400M-distill" \ --per_device_train_batch_size 16 \ --per_device_eval_batch_size 16 \ --learning_rate 1e-6 \ --weight_decay 0.0 \ --num_train_epochs 1 \ --gradient_accumulation_steps 1 \ --num_warmup_steps 100 \ --output_dir "/opt/ml/model" \ --seed 25 \ --logging_steps 100 \ --report_name "blenderbot_400M_finetuning" ``` Output logs: ``` [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:37:39 - INFO - __main__ - Distributed environment: MULTI_GPU Backend: smddp [1,mpirank:0,algo-1]<stderr>:Num processes: 8 [1,mpirank:0,algo-1]<stderr>:Process index: 0 [1,mpirank:0,algo-1]<stderr>:Local process index: 0 [1,mpirank:0,algo-1]<stderr>:Device: cuda:0 [1,mpirank:0,algo-1]<stderr>: ... [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - ***** Running training ***** [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Num examples = 10000 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Num Epochs = 1 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Instantaneous batch size per device = 16 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Total train batch size (w. parallel, distributed & accumulation) = 128 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Gradient Accumulation steps = 1 [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:31 - INFO - __main__ - Total optimization steps = 79 ... [1,mpirank:0,algo-1]<stderr>:#015100%|██████████| 79/79 [00:19<00:00, 4.79it/s] [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:50 - INFO - __main__ - Epoch 0 training took 19.50162172317505 seconds [1,mpirank:0,algo-1]<stdout>:starting evaluation [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:55 - INFO - __main__ - printing few sample generations and corresponding labels from eval set [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:55 - INFO - __main__ - prompt | generated | label [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:55 - INFO - __main__ - chatbot:your persona: i work as an electrician. i always sleep 8 hours a day. </s> <s> Which level are you at?</s> | I'm at the top of the ladder. I work for an electrical company. | I received on-the-job training when i first started [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:38:55 - INFO - __main__ - chatbot:your persona: i work as an electrician. i always sleep 8 hours a day. </s> <s> Which level are you at? </s> <s> I received on-the-job training when i first started </s> <s> Thats great! How long have you been doing this work? </s> | I've been working as an Electrician for about 5 years now. It's a great job. | For a good number of years now. ... [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:39:02 - INFO - __main__ - chatbot:your persona: i'm a painter and love to create art. i'm a talented singer and have won several competitions. </s> <s> I love the Doors! They have such a unique sound. Do you have a favorite Doors song?</s> | My favorite song of theirs is "When I Was Your Man". What's yours? | It's a tie between People are strange and Love me two times. What's your favorite? [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:39:02 - INFO - __main__ - chatbot:your persona: i'm a painter and love to create art. i'm a talented singer and have won several competitions. </s> <s> I think my favorite is Love Street. It has such a haunting melody. Have you heard that one?</s> | No, I haven't. I'll have to check it out. What genre is it? | Yeah, it's a pretty great song, Jim Morrison was just an amazing songwriter, I aspire to make songs as good as his one day. ... [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:39:17 - INFO - __main__ - {'bleu': 1.7067114414104911} [1,mpirank:0,algo-1]<stdout>:evaluation completed [1,mpirank:0,algo-1]<stderr>:09/21/2022 13:39:17 - INFO - __main__ - Epoch 0 evaluation took 24.294514417648315 seconds [1,mpirank:0,algo-1]<stderr>:Configuration saved in /opt/ml/model/config.json [1,mpirank:0,algo-1]<stderr>:Model weights saved in /opt/ml/model/pytorch_model.bin [1,mpirank:0,algo-1]<stderr>:tokenizer config file saved in /opt/ml/model/tokenizer_config.json [1,mpirank:0,algo-1]<stderr>:Special tokens file saved in /opt/ml/model/special_tokens_map.json [1,mpirank:0,algo-1]<stderr>:#015100%|██████████| 79/79 [00:47<00:00, 1.65it/s] 2022-09-21 13:39:27,753 sagemaker-training-toolkit INFO Waiting for the process to finish and give a return code. 2022-09-21 13:39:27,753 sagemaker-training-toolkit INFO Done waiting for a return code. Received 0 from exiting process. 2022-09-21 13:39:27,754 sagemaker-training-toolkit INFO Reporting training SUCCESS ```
notebooks/sagemaker/22_accelerate_sagemaker_examples/README.md/0
{ "file_path": "notebooks/sagemaker/22_accelerate_sagemaker_examples/README.md", "repo_id": "notebooks", "token_count": 3628 }
169
<jupyter_start><jupyter_text>Stable Diffusion on Amazon SageMakerWelcome to this Amazon SageMaker guide on how to use the [Stable Diffusion](https://huggingface.co/blog/stable_diffusion) to generate image for a given input prompt. We will deploy [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) to Amazon SageMake for real-time inference using Hugging Faces [🧨 Diffusers library](https://huggingface.co/docs/diffusers/index).What we are going to do 1. Create Stable Diffusion inference script 2. Create SageMaker `model.tar.gz` artifact3. Deploy the model to Amazon SageMaker4. Generate images using the deployed model What is Stable Diffusion?Stable Diffusion is a text-to-image latent diffusion model created by the researchers and engineers from [CompVis](https://github.com/CompVis), [Stability AI](https://stability.ai/) and [LAION](https://laion.ai/). It is trained on 512x512 images from a subset of the [LAION-5B](https://laion.ai/blog/laion-5b/) database. LAION-5B is the largest, freely accessible multi-modal dataset that currently exists.This guide will not explain how the model works. If you are interested you should checkout the [Stable Diffusion with 🧨 Diffusers](https://huggingface.co/blog/stable_diffusion) blog post or [The Annotated Diffusion Model](https://huggingface.co/blog/annotated-diffusion)--- Before we can get started, make sure you have [Hugging Face user account](https://huggingface.co/join). The account is needed to load the [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) from the [Hugging Face Hub](https://huggingface.co/).Create account: https://huggingface.co/joinBefore we can get started we have to install the missing dependencies to be able to create our `model.tar.gz` artifact and create our Amazon SageMaker endpoint. We also have to make sure we have the permission to create our SageMaker Endpoint.<jupyter_code>!pip install "sagemaker==2.116.0" "huggingface_hub==0.10.1" --upgrade --quiet<jupyter_output><empty_output><jupyter_text>_If you are going to use Sagemaker in a local environment (not SageMaker Studio or Notebook Instances). You need access to an IAM Role with the required permissions for Sagemaker. You can find [here](https://docs.aws.amazon.com/sagemaker/latest/dg/sagemaker-roles.html) more about it._<jupyter_code>import sagemaker import boto3 sess = sagemaker.Session() # sagemaker session bucket -> used for uploading data, models and logs # sagemaker will automatically create this bucket if it not exists sagemaker_session_bucket=None if sagemaker_session_bucket is None and sess is not None: # set to default bucket if a bucket name is not given sagemaker_session_bucket = sess.default_bucket() try: role = sagemaker.get_execution_role() except ValueError: iam = boto3.client('iam') role = iam.get_role(RoleName='sagemaker_execution_role')['Role']['Arn'] sess = sagemaker.Session(default_bucket=sagemaker_session_bucket) print(f"sagemaker role arn: {role}") print(f"sagemaker bucket: {sess.default_bucket()}") print(f"sagemaker session region: {sess.boto_region_name}")<jupyter_output>Couldn't call 'get_role' to get Role ARN from role name philippschmid to get Role path.<jupyter_text>Create Stable Diffusion inference script Amazon SageMaker allows us to customize the inference script by providing a `inference.py` file. The `inference.py` file is the entry point to our model. It is responsible for loading the model and handling the inference request. If you are used to deploying Hugging Face Transformers that might be knew to you. Usually, we just provide the `HF_MODEL_ID` and `HF_TASK` and the Hugging Face DLC takes care of the rest. For `diffusers` thats not yet possible. We have to provide the `inference.py` file and implement the `model_fn` and `predict_fn` functions. If you want to learn more about creating a custom inference script you can check out [Creating document embeddings with Hugging Face's Transformers & Amazon SageMaker](https://www.philschmid.de/custom-inference-huggingface-sagemaker)In addition to the `inference.py` file we also have to provide a `requirements.txt` file. The `requirements.txt` file is used to install the dependencies for our `inference.py` file.The first step is to create a `code/` directory.<jupyter_code>!mkdir code<jupyter_output><empty_output><jupyter_text>As next we create a `requirements.txt` file and add the `diffusers` library to it.<jupyter_code>%%writefile code/requirements.txt diffusers==0.6.0 transformers==4.23.1<jupyter_output><empty_output><jupyter_text>The last step for our inference handler is to create the `inference.py` file. The `inference.py` file is responsible for loading the model and handling the inference request. The `model_fn` function is called when the model is loaded. The `predict_fn` function is called when we want to do inference. We are using the `diffusers` library to load the model in the `model_fn` and generate 4 image for an input prompt with the `predict_fn`. The `predict_fn` function returns the `4` image as a `base64` encoded string.<jupyter_code>%%writefile code/inference.py import base64 import torch from io import BytesIO from diffusers import StableDiffusionPipeline def model_fn(model_dir): # Load stable diffusion and move it to the GPU pipe = StableDiffusionPipeline.from_pretrained(model_dir, torch_dtype=torch.float16) pipe = pipe.to("cuda") return pipe def predict_fn(data, pipe): # get prompt & parameters prompt = data.pop("inputs", data) # set valid HP for stable diffusion num_inference_steps = data.pop("num_inference_steps", 50) guidance_scale = data.pop("guidance_scale", 7.5) num_images_per_prompt = data.pop("num_images_per_prompt", 4) # run generation with parameters generated_images = pipe( prompt, num_inference_steps=num_inference_steps, guidance_scale=guidance_scale, num_images_per_prompt=num_images_per_prompt, )["images"] # create response encoded_images = [] for image in generated_images: buffered = BytesIO() image.save(buffered, format="JPEG") encoded_images.append(base64.b64encode(buffered.getvalue()).decode()) # create response return {"generated_images": encoded_images}<jupyter_output><empty_output><jupyter_text>Create SageMaker `model.tar.gz` artifactTo use our `inference.py` we need to bundle it together with our model weights into a `model.tar.gz`. The archive includes all our model-artifcats to run inference. The `inference.py` script will be placed into a `code/` folder. We will use the `huggingface_hub` SDK to easily download [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) from [Hugging Face](https://huggingface.co/CompVis/stable-diffusion-v1-4) and then upload it to Amazon S3 with the `sagemaker` SDK.Before we can load our model from the Hugging Face Hub we have to make sure that we accepted the license of [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) to be able to use it. [CompVis/stable-diffusion-v1-4](https://huggingface.co/CompVis/stable-diffusion-v1-4) is published under the [CreativeML OpenRAIL-M license](https://huggingface.co/spaces/CompVis/stable-diffusion-license). You can accept the license by clicking on the `Agree and access repository` button on the model page at: https://huggingface.co/CompVis/stable-diffusion-v1-4. _Note: This will give access to the repository for the logged in user. This user can then be used to generate [HF Tokens](https://huggingface.co/settings/tokens) to load the model programmatically._Before we can load the model make sure you have a valid [HF Token](https://huggingface.co/settings/token). You can create a token by going to your [Hugging Face Settings](https://huggingface.co/settings/token) and clicking on the `New token` button. Make sure the enviornment has enough diskspace to store the model, ~30GB should be enough.<jupyter_code>from distutils.dir_util import copy_tree from pathlib import Path from huggingface_hub import snapshot_download import random HF_MODEL_ID="CompVis/stable-diffusion-v1-4" HF_TOKEN="" # your hf token: https://huggingface.co/settings/tokens assert len(HF_TOKEN) > 0, "Please set HF_TOKEN to your huggingface token. You can find it here: https://huggingface.co/settings/tokens" # download snapshot snapshot_dir = snapshot_download(repo_id=HF_MODEL_ID,revision="fp16",use_auth_token=HF_TOKEN) # create model dir model_tar = Path(f"model-{random.getrandbits(16)}") model_tar.mkdir(exist_ok=True) # copy snapshot to model dir copy_tree(snapshot_dir, str(model_tar))<jupyter_output><empty_output><jupyter_text>The next step is to copy the `code/` directory into the `model/` directory.<jupyter_code># copy code/ to model dir copy_tree("code/", str(model_tar.joinpath("code")))<jupyter_output><empty_output><jupyter_text>Before we can upload the model to Amazon S3 we have to create a `model.tar.gz` archive. Important is that the archive should directly contain all files and not a folder with the files. For example, your file should look like this:```model.tar.gz/|- model_index.json|- unet/|- code/```<jupyter_code>import tarfile import os # helper to create the model.tar.gz def compress(tar_dir=None,output_file="model.tar.gz"): parent_dir=os.getcwd() os.chdir(tar_dir) with tarfile.open(os.path.join(parent_dir, output_file), "w:gz") as tar: for item in os.listdir('.'): print(item) tar.add(item, arcname=item) os.chdir(parent_dir) compress(str(model_tar))<jupyter_output><empty_output><jupyter_text>After we created the `model.tar.gz` archive we can upload it to Amazon S3. We will use the `sagemaker` SDK to upload the model to our sagemaker session bucket.<jupyter_code>from sagemaker.s3 import S3Uploader # upload model.tar.gz to s3 s3_model_uri=S3Uploader.upload(local_path="model.tar.gz", desired_s3_uri=f"s3://{sess.default_bucket()}/stable-diffusion-v1-4") print(f"model uploaded to: {s3_model_uri}")<jupyter_output><empty_output><jupyter_text>Deploy the model to Amazon SageMakerAfter we have uploaded our model archive we can deploy our model to Amazon SageMaker. We will use `HuggingfaceModel` to create our real-time inference endpoint.We are going to deploy the model to an `g4dn.xlarge` instance. The `g4dn.xlarge` instance is a GPU instance with 1 NVIDIA Tesla T4 GPUs. If you are interested in how you could add autoscaling to your endpoint you can check out [Going Production: Auto-scaling Hugging Face Transformers with Amazon SageMaker](https://www.philschmid.de/auto-scaling-sagemaker-huggingface).<jupyter_code>from sagemaker.huggingface.model import HuggingFaceModel # create Hugging Face Model Class huggingface_model = HuggingFaceModel( model_data=s3_model_uri, # path to your model and script role=role, # iam role with permissions to create an Endpoint transformers_version="4.17", # transformers version used pytorch_version="1.10", # pytorch version used py_version='py38', # python version used ) # deploy the endpoint endpoint predictor = huggingface_model.deploy( initial_instance_count=1, instance_type="ml.g4dn.xlarge" )<jupyter_output>--------------!<jupyter_text>Generate images using the deployed modelThe `.deploy()` returns an `HuggingFacePredictor` object which can be used to request inference. Our endpoint expects a `json` with at least `inputs` key. The `inputs` key is the input prompt for the model, which will be used to generate the image. Additionally, we can provide `num_inference_steps`, `guidance_scale` & `num_images_per_prompt` to controll the generation.The `predictor.predict()` function returns a `json` with the `generated_images` key. The `generated_images` key contains the `4` generated images as a `base64` encoded string. To decode our response we added a small helper function `decode_base64_to_image` which takes the `base64` encoded string and returns a `PIL.Image` object and `display_images`, which takes a list of `PIL.Image` objects and displays them.<jupyter_code>from PIL import Image from io import BytesIO from IPython.display import display import base64 import matplotlib.pyplot as plt # helper decoder def decode_base64_image(image_string): base64_image = base64.b64decode(image_string) buffer = BytesIO(base64_image) return Image.open(buffer) # display PIL images as grid def display_images(images=None,columns=3, width=100, height=100): plt.figure(figsize=(width, height)) for i, image in enumerate(images): plt.subplot(int(len(images) / columns + 1), columns, i + 1) plt.axis('off') plt.imshow(image)<jupyter_output><empty_output><jupyter_text>Now, lets generate some images. As example lets generate `3` images for the prompt `A dog trying catch a flying pizza art drawn by disney concept artists`. Generating `3` images takes around `30` seconds.<jupyter_code>num_images_per_prompt = 3 prompt = "A dog trying catch a flying pizza art drawn by disney concept artists, golden colour, high quality, highly detailed, elegant, sharp focus" # run prediction response = predictor.predict(data={ "inputs": prompt, "num_images_per_prompt" : num_images_per_prompt } ) # decode images decoded_images = [decode_base64_image(image) for image in response["generated_images"]] # visualize generation display_images(decoded_images)<jupyter_output>/tmp/ipykernel_5489/921716793.py:8: MatplotlibDeprecationWarning: Auto-removal of overlapping axes is deprecated since 3.6 and will be removed two minor releases later; explicitly call ax.remove() as needed. plt.subplot(int(len(images) / columns + 1), columns, i + 1)<jupyter_text>Delete model and endpointTo clean up, we can delete the model and endpoint.<jupyter_code>predictor.delete_model() predictor.delete_endpoint()<jupyter_output><empty_output>
notebooks/sagemaker/23_stable_diffusion_inference/sagemaker-notebook.ipynb/0
{ "file_path": "notebooks/sagemaker/23_stable_diffusion_inference/sagemaker-notebook.ipynb", "repo_id": "notebooks", "token_count": 4469 }
170
import os import argparse from transformers import ( AutoModelForCausalLM, AutoTokenizer, set_seed, default_data_collator, BitsAndBytesConfig, Trainer, TrainingArguments, ) from datasets import load_from_disk import torch from peft import PeftConfig, PeftModel def parse_arge(): """Parse the arguments.""" parser = argparse.ArgumentParser() # add model id and dataset path argument parser.add_argument( "--model_id", type=str, default="google/flan-t5-xl", help="Model id to use for training.", ) parser.add_argument("--dataset_path", type=str, default="lm_dataset", help="Path to dataset.") # add training hyperparameters for epochs, batch size, learning rate, and seed parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for.") parser.add_argument( "--per_device_train_batch_size", type=int, default=1, help="Batch size to use for training.", ) parser.add_argument("--lr", type=float, default=5e-5, help="Learning rate to use for training.") parser.add_argument("--seed", type=int, default=42, help="Seed to use for training.") parser.add_argument( "--gradient_checkpointing", type=bool, default=True, help="Path to deepspeed config file.", ) parser.add_argument( "--bf16", type=bool, default=True if torch.cuda.get_device_capability()[0] == 8 else False, help="Whether to use bf16.", ) parser.add_argument( "--merge_weights", type=bool, default=True, help="Whether to merge LoRA weights with base model.", ) args = parser.parse_known_args() return args def create_peft_config(model, gradient_checkpointing=True): from peft import ( get_peft_model, LoraConfig, TaskType, prepare_model_for_kbit_training, ) peft_config = LoraConfig( r=64, lora_alpha=16, target_modules=[ "query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h", ], lora_dropout=0.1, bias="none", task_type=TaskType.CAUSAL_LM, ) # prepare int-4 model for training model = prepare_model_for_kbit_training(model) if gradient_checkpointing: model.gradient_checkpointing_enable() model = get_peft_model(model, peft_config) model.print_trainable_parameters() return model def training_function(args): # set seed set_seed(args.seed) dataset = load_from_disk(args.dataset_path) # load model from the hub with a bnb config bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16, ) model = AutoModelForCausalLM.from_pretrained( args.model_id, use_cache=False if args.gradient_checkpointing else True, # this is needed for gradient checkpointing trust_remote_code=True, # ATTENTION: This allows remote code execution device_map="auto", quantization_config=bnb_config, ) # create peft config model = create_peft_config(model, args.gradient_checkpointing) # Define training args output_dir = "/tmp" training_args = TrainingArguments( output_dir=output_dir, overwrite_output_dir=True, per_device_train_batch_size=args.per_device_train_batch_size, bf16=args.bf16, # Use BF16 if available learning_rate=args.lr, num_train_epochs=args.epochs, gradient_checkpointing=args.gradient_checkpointing, # logging strategies logging_dir=f"{output_dir}/logs", logging_strategy="steps", logging_steps=10, save_strategy="no", ) # Create Trainer instance trainer = Trainer( model=model, args=training_args, train_dataset=dataset, data_collator=default_data_collator, ) # pre-process the model by upcasting the layer norms in float 32 for for name, module in trainer.model.named_modules(): if "norm" in name: module = module.to(torch.float32) # Start training trainer.train() if args.merge_weights: # merge adapter weights with base model and save # save int 4 model trainer.model.save_pretrained(output_dir, safe_serialization=False) # clear memory del model del trainer torch.cuda.empty_cache() from peft import AutoPeftModelForCausalLM # load PEFT model in fp16 offload_folder = "/tmp/offload" model = AutoPeftModelForCausalLM.from_pretrained( output_dir, torch_dtype=torch.float16, low_cpu_mem_usage=True, trust_remote_code=True, # ATTENTION: This allows remote code execution ) # Merge LoRA and base model and save merged_model = model.merge_and_unload() merged_model.save_pretrained("/opt/ml/model/",safe_serialization=True) else: trainer.model.save_pretrained("/opt/ml/model/", safe_serialization=True) # save tokenizer for easy inference tokenizer = AutoTokenizer.from_pretrained(args.model_id, trust_remote_code=True) tokenizer.save_pretrained("/opt/ml/model/") def main(): args, _ = parse_arge() training_function(args) if __name__ == "__main__": main()
notebooks/sagemaker/28_train_llms_with_qlora/scripts/run_clm.py/0
{ "file_path": "notebooks/sagemaker/28_train_llms_with_qlora/scripts/run_clm.py", "repo_id": "notebooks", "token_count": 2378 }
171
<!--Copyright 2023 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # IA3 This conceptual guide gives a brief overview of [IA3](https://arxiv.org/abs/2205.05638), a parameter-efficient fine tuning technique that is intended to improve over [LoRA](./lora). To make fine-tuning more efficient, IA3 (Infused Adapter by Inhibiting and Amplifying Inner Activations) rescales inner activations with learned vectors. These learned vectors are injected in the attention and feedforward modules in a typical transformer-based architecture. These learned vectors are the only trainable parameters during fine-tuning, and thus the original weights remain frozen. Dealing with learned vectors (as opposed to learned low-rank updates to a weight matrix like LoRA) keeps the number of trainable parameters much smaller. Being similar to LoRA, IA3 carries many of the same advantages: * IA3 makes fine-tuning more efficient by drastically reducing the number of trainable parameters. (For T0, an IA3 model only has about 0.01% trainable parameters, while even LoRA has > 0.1%) * The original pre-trained weights are kept frozen, which means you can have multiple lightweight and portable IA3 models for various downstream tasks built on top of them. * Performance of models fine-tuned using IA3 is comparable to the performance of fully fine-tuned models. * IA3 does not add any inference latency because adapter weights can be merged with the base model. In principle, IA3 can be applied to any subset of weight matrices in a neural network to reduce the number of trainable parameters. Following the authors' implementation, IA3 weights are added to the key, value and feedforward layers of a Transformer model. To be specific, for transformer models, IA3 weights are added to the outputs of key and value layers, and to the input of the second feedforward layer in each transformer block. Given the target layers for injecting IA3 parameters, the number of trainable parameters can be determined based on the size of the weight matrices. ## Common IA3 parameters in PEFT As with other methods supported by PEFT, to fine-tune a model using IA3, you need to: 1. Instantiate a base model. 2. Create a configuration (`IA3Config`) where you define IA3-specific parameters. 3. Wrap the base model with `get_peft_model()` to get a trainable `PeftModel`. 4. Train the `PeftModel` as you normally would train the base model. `IA3Config` allows you to control how IA3 is applied to the base model through the following parameters: - `target_modules`: The modules (for example, attention blocks) to apply the IA3 vectors. - `feedforward_modules`: The list of modules to be treated as feedforward layers in `target_modules`. While learned vectors are multiplied with the output activation for attention blocks, the vectors are multiplied with the input for classic feedforward layers. Note that `feedforward_modules` must be a subset of `target_modules`. - `modules_to_save`: List of modules apart from IA3 layers to be set as trainable and saved in the final checkpoint. These typically include model's custom head that is randomly initialized for the fine-tuning task. ## Example Usage For the task of sequence classification, one can initialize the IA3 config for a Llama model as follows: ```py peft_config = IA3Config( task_type=TaskType.SEQ_CLS, target_modules=["k_proj", "v_proj", "down_proj"], feedforward_modules=["down_proj"] ) ```
peft/docs/source/conceptual_guides/ia3.md/0
{ "file_path": "peft/docs/source/conceptual_guides/ia3.md", "repo_id": "peft", "token_count": 1030 }
172
<!--Copyright 2024 The HuggingFace Team. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be rendered properly in your Markdown viewer. --> # IA3 [IA3](../conceptual_guides/ia3) multiplies the model's activations (the keys and values in the self-attention and encoder-decoder attention blocks, and the intermediate activation of the position-wise feedforward network) by three learned vectors. This PEFT method introduces an even smaller number of trainable parameters than LoRA which introduces weight matrices instead of vectors. The original model's parameters are kept frozen and only these vectors are updated. As a result, it is faster, cheaper and more efficient to finetune for a new downstream task. This guide will show you how to train a sequence-to-sequence model with IA3 to *generate a sentiment* given some financial news. <Tip> Some familiarity with the general process of training a sequence-to-sequence would be really helpful and allow you to focus on how to apply IA3. If you’re new, we recommend taking a look at the [Translation](https://huggingface.co/docs/transformers/tasks/translation) and [Summarization](https://huggingface.co/docs/transformers/tasks/summarization) guides first from the Transformers documentation. When you’re ready, come back and see how easy it is to drop PEFT in to your training! </Tip> ## Dataset You'll use the sentences_allagree subset of the [financial_phrasebank](https://huggingface.co/datasets/financial_phrasebank) dataset. This subset contains financial news with 100% annotator agreement on the sentiment label. Take a look at the [dataset viewer](https://huggingface.co/datasets/financial_phrasebank/viewer/sentences_allagree) for a better idea of the data and sentences you'll be working with. Load the dataset with the [`~datasets.load_dataset`] function. This subset of the dataset only contains a train split, so use the [`~datasets.train_test_split`] function to create a train and validation split. Create a new `text_label` column so it is easier to understand what the `label` values `0`, `1`, and `2` mean. ```py from datasets import load_dataset ds = load_dataset("financial_phrasebank", "sentences_allagree") ds = ds["train"].train_test_split(test_size=0.1) ds["validation"] = ds["test"] del ds["test"] classes = ds["train"].features["label"].names ds = ds.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) ds["train"][0] {'sentence': 'It will be operated by Nokia , and supported by its Nokia NetAct network and service management system .', 'label': 1, 'text_label': 'neutral'} ``` Load a tokenizer and create a preprocessing function that: 1. tokenizes the inputs, pads and truncates the sequence to the `max_length` 2. apply the same tokenizer to the labels but with a shorter `max_length` that corresponds to the label 3. mask the padding tokens ```py from transformers import AutoTokenizer text_column = "sentence" label_column = "text_label" max_length = 128 tokenizer = AutoTokenizer.from_pretrained("bigscience/mt0-large") def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs ``` Use the [`~datasets.Dataset.map`] function to apply the preprocessing function to the entire dataset. ```py processed_ds = ds.map( preprocess_function, batched=True, num_proc=1, remove_columns=ds["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) ``` Create a training and evaluation [`DataLoader`](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader), and set `pin_memory=True` to speed up data transfer to the GPU during training if your dataset samples are on a CPU. ```py from torch.utils.data import DataLoader from transformers import default_data_collator train_ds = processed_ds["train"] eval_ds = processed_ds["validation"] batch_size = 8 train_dataloader = DataLoader( train_ds, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_ds, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) ``` ## Model Now you can load a pretrained model to use as the base model for IA3. This guide uses the [bigscience/mt0-large](https://huggingface.co/bigscience/mt0-large) model, but you can use any sequence-to-sequence model you like. ```py from transformers import AutoModelForSeq2SeqLM model = AutoModelForSeq2SeqLM.from_pretrained("bigscience/mt0-large") ``` ### PEFT configuration and model All PEFT methods need a configuration that contains and specifies all the parameters for how the PEFT method should be applied. Create an [`IA3Config`] with the task type and set the inference mode to `False`. You can find additional parameters for this configuration in the [API reference](../package_reference/ia3#ia3config). <Tip> Call the [`~PeftModel.print_trainable_parameters`] method to compare the number of trainable parameters of [`PeftModel`] versus the number of parameters in the base model! </Tip> Once the configuration is setup, pass it to the [`get_peft_model`] function along with the base model to create a trainable [`PeftModel`]. ```py from peft import IA3Config, get_peft_model peft_config = IA3Config(task_type="SEQ_2_SEQ_LM") model = get_peft_model(model, peft_config) model.print_trainable_parameters() "trainable params: 282,624 || all params: 1,229,863,936 || trainable%: 0.022980103060766553" ``` ### Training Set up an optimizer and learning rate scheduler. ```py import torch from transformers import get_linear_schedule_with_warmup lr = 8e-3 num_epochs = 3 optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) ``` Move the model to the GPU and create a training loop that reports the loss and perplexity for each epoch. ```py from tqdm import tqdm device = "cuda" model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") ``` ## Share your model After training is complete, you can upload your model to the Hub with the [`~transformers.PreTrainedModel.push_to_hub`] method. You'll need to login to your Hugging Face account first and enter your token when prompted. ```py from huggingface_hub import notebook_login account = <your-hf-account-name> peft_model_id = f"{account}/mt0-large-ia3" model.push_to_hub(peft_model_id) ``` ## Inference To load the model for inference, use the [`~AutoPeftModelForSeq2SeqLM.from_pretrained`] method. Let's also load a sentence of financial news from the dataset to generate a sentiment for. ```py from peft import AutoPeftModelForSeq2SeqLM model = AutoPeftModelForSeq2SeqLM.from_pretrained("<your-hf-account-name>/mt0-large-ia3").to("cuda") tokenizer = AutoTokenizer.from_pretrained("bigscience/mt0-large") i = 15 inputs = tokenizer(ds["validation"][text_column][i], return_tensors="pt") print(ds["validation"][text_column][i]) "The robust growth was the result of the inclusion of clothing chain Lindex in the Group in December 2007 ." ``` Call the [`~transformers.GenerationMixin.generate`] method to generate the predicted sentiment label. ```py with torch.no_grad(): inputs = {k: v.to(device) for k, v in inputs.items()} outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True)) ['positive'] ```
peft/docs/source/task_guides/ia3.md/0
{ "file_path": "peft/docs/source/task_guides/ia3.md", "repo_id": "peft", "token_count": 3197 }
173
<jupyter_start><jupyter_code>from transformers import AutoModelForSeq2SeqLM from peft import get_peft_config, get_peft_model, get_peft_model_state_dict, LoraConfig, TaskType import torch from datasets import load_dataset import os os.environ["TOKENIZERS_PARALLELISM"] = "false" from transformers import AutoTokenizer from torch.utils.data import DataLoader from transformers import default_data_collator, get_linear_schedule_with_warmup from tqdm import tqdm from datasets import load_dataset device = "cuda" model_name_or_path = "bigscience/mt0-large" tokenizer_name_or_path = "bigscience/mt0-large" checkpoint_name = "financial_sentiment_analysis_lora_v1.pt" text_column = "sentence" label_column = "text_label" max_length = 128 lr = 1e-3 num_epochs = 3 batch_size = 8 # creating model peft_config = LoraConfig(task_type=TaskType.SEQ_2_SEQ_LM, inference_mode=False, r=8, lora_alpha=32, lora_dropout=0.1) model = AutoModelForSeq2SeqLM.from_pretrained(model_name_or_path) model = get_peft_model(model, peft_config) model.print_trainable_parameters() model # loading dataset dataset = load_dataset("financial_phrasebank", "sentences_allagree") dataset = dataset["train"].train_test_split(test_size=0.1) dataset["validation"] = dataset["test"] del dataset["test"] classes = dataset["train"].features["label"].names dataset = dataset.map( lambda x: {"text_label": [classes[label] for label in x["label"]]}, batched=True, num_proc=1, ) dataset["train"][0] # data preprocessing tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) def preprocess_function(examples): inputs = examples[text_column] targets = examples[label_column] model_inputs = tokenizer(inputs, max_length=max_length, padding="max_length", truncation=True, return_tensors="pt") labels = tokenizer(targets, max_length=3, padding="max_length", truncation=True, return_tensors="pt") labels = labels["input_ids"] labels[labels == tokenizer.pad_token_id] = -100 model_inputs["labels"] = labels return model_inputs processed_datasets = dataset.map( preprocess_function, batched=True, num_proc=1, remove_columns=dataset["train"].column_names, load_from_cache_file=False, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] train_dataloader = DataLoader( train_dataset, shuffle=True, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True ) eval_dataloader = DataLoader(eval_dataset, collate_fn=default_data_collator, batch_size=batch_size, pin_memory=True) # optimizer and lr scheduler optimizer = torch.optim.AdamW(model.parameters(), lr=lr) lr_scheduler = get_linear_schedule_with_warmup( optimizer=optimizer, num_warmup_steps=0, num_training_steps=(len(train_dataloader) * num_epochs), ) # training and evaluation model = model.to(device) for epoch in range(num_epochs): model.train() total_loss = 0 for step, batch in enumerate(tqdm(train_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} outputs = model(**batch) loss = outputs.loss total_loss += loss.detach().float() loss.backward() optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() eval_loss = 0 eval_preds = [] for step, batch in enumerate(tqdm(eval_dataloader)): batch = {k: v.to(device) for k, v in batch.items()} with torch.no_grad(): outputs = model(**batch) loss = outputs.loss eval_loss += loss.detach().float() eval_preds.extend( tokenizer.batch_decode(torch.argmax(outputs.logits, -1).detach().cpu().numpy(), skip_special_tokens=True) ) eval_epoch_loss = eval_loss / len(eval_dataloader) eval_ppl = torch.exp(eval_epoch_loss) train_epoch_loss = total_loss / len(train_dataloader) train_ppl = torch.exp(train_epoch_loss) print(f"{epoch=}: {train_ppl=} {train_epoch_loss=} {eval_ppl=} {eval_epoch_loss=}") # print accuracy correct = 0 total = 0 for pred, true in zip(eval_preds, dataset["validation"]["text_label"]): if pred.strip() == true.strip(): correct += 1 total += 1 accuracy = correct / total * 100 print(f"{accuracy=} % on the evaluation dataset") print(f"{eval_preds[:10]=}") print(f"{dataset['validation']['text_label'][:10]=}") # saving model peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" model.save_pretrained(peft_model_id) ckpt = f"{peft_model_id}/adapter_model.bin" !du -h $ckpt from peft import PeftModel, PeftConfig peft_model_id = f"{model_name_or_path}_{peft_config.peft_type}_{peft_config.task_type}" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForSeq2SeqLM.from_pretrained(config.base_model_name_or_path) model = PeftModel.from_pretrained(model, peft_model_id) model.eval() i = 13 inputs = tokenizer(dataset["validation"][text_column][i], return_tensors="pt") print(dataset["validation"][text_column][i]) print(inputs) with torch.no_grad(): outputs = model.generate(input_ids=inputs["input_ids"], max_new_tokens=10) print(outputs) print(tokenizer.batch_decode(outputs.detach().cpu().numpy(), skip_special_tokens=True))<jupyter_output>- Demand for fireplace products was lower than expected , especially in Germany . {'input_ids': tensor([[ 259, 264, 259, 82903, 332, 1090, 10040, 10371, 639, 259, 19540, 2421, 259, 25505, 259, 261, 259, 21230, 281, 17052, 259, 260, 1]]), 'attention_mask': tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])} tensor([[ 0, 259, 32588, 1]]) ['negative']
peft/examples/conditional_generation/peft_lora_seq2seq.ipynb/0
{ "file_path": "peft/examples/conditional_generation/peft_lora_seq2seq.ipynb", "repo_id": "peft", "token_count": 2336 }
174
<jupyter_start><jupyter_text>Fine-tune large models using 🤗 `peft` adapters, `transformers` & `bitsandbytes`In this tutorial we will cover how we can fine-tune large language models using the very recent `peft` library and `bitsandbytes` for loading large models in 8-bit.The fine-tuning method will rely on a recent method called "Low Rank Adapters" (LoRA), instead of fine-tuning the entire model you just have to fine-tune these adapters and load them properly inside the model. After fine-tuning the model you can also share your adapters on the 🤗 Hub and load them very easily. Let's get started! Install requirementsFirst, run the cells below to install the requirements:<jupyter_code>!pip install -q bitsandbytes datasets accelerate !pip install -q git+https://github.com/huggingface/transformers.git@main git+https://github.com/huggingface/peft.git<jupyter_output> ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 76.3/76.3 MB 10.3 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 462.8/462.8 KB 25.4 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 199.7/199.7 KB 25.5 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 190.3/190.3 KB 23.1 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 213.0/213.0 KB 26.4 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 132.0/132.0 KB 18.5 MB/s eta 0:00:00  ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 140.6/140.6 KB 20.2 MB/s eta 0:00:00 [?25h Installing build dependencies ... [?25l[?25hdone Getting requirements to build wheel ... [?25l[?25hdone Preparing metadata (pyproject.tom[...]<jupyter_text>Model loadingHere let's load the `opt-6.7b` model, its weights in half-precision (float16) are about 13GB on the Hub! If we load them in 8-bit we would require around 7GB of memory instead.<jupyter_code>import os import torch import torch.nn as nn import bitsandbytes as bnb from transformers import AutoTokenizer, AutoConfig, AutoModelForCausalLM, BitsAndBytesConfig model = AutoModelForCausalLM.from_pretrained("facebook/opt-6.7b", quantization_config=BitsAndBytesConfig(load_in_8bit=True)) tokenizer = AutoTokenizer.from_pretrained("facebook/opt-6.7b")<jupyter_output><empty_output><jupyter_text>Prepare model for trainingSome pre-processing needs to be done before training such an int8 model using `peft`, therefore let's import an utiliy function `prepare_model_for_int8_training` that will: - Casts all the non `int8` modules to full precision (`fp32`) for stability- Add a `forward_hook` to the input embedding layer to enable gradient computation of the input hidden states- Enable gradient checkpointing for more memory-efficient training<jupyter_code>from peft import prepare_model_for_int8_training model = prepare_model_for_int8_training(model)<jupyter_output><empty_output><jupyter_text>Apply LoRAHere comes the magic with `peft`! Let's load a `PeftModel` and specify that we are going to use low-rank adapters (LoRA) using `get_peft_model` utility function from `peft`.<jupyter_code>def print_trainable_parameters(model): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in model.named_parameters(): all_param += param.numel() if param.requires_grad: trainable_params += param.numel() print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) from peft import LoraConfig, get_peft_model config = LoraConfig( r=16, lora_alpha=32, target_modules=["q_proj", "v_proj"], lora_dropout=0.05, bias="none", task_type="CAUSAL_LM" ) model = get_peft_model(model, config) print_trainable_parameters(model)<jupyter_output>trainable params: 8388608 || all params: 6666862592 || trainable%: 0.12582542214183376<jupyter_text>Training<jupyter_code>import transformers from datasets import load_dataset data = load_dataset("Abirate/english_quotes") data = data.map(lambda samples: tokenizer(samples["quote"]), batched=True) trainer = transformers.Trainer( model=model, train_dataset=data["train"], args=transformers.TrainingArguments( per_device_train_batch_size=4, gradient_accumulation_steps=4, warmup_steps=100, max_steps=200, learning_rate=2e-4, fp16=True, logging_steps=1, output_dir="outputs", ), data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False), ) model.config.use_cache = False # silence the warnings. Please re-enable for inference! trainer.train()<jupyter_output><empty_output><jupyter_text>Share adapters on the 🤗 Hub<jupyter_code>from huggingface_hub import notebook_login notebook_login() model.push_to_hub("ybelkada/opt-6.7b-lora", use_auth_token=True)<jupyter_output>Uploading the following files to ybelkada/opt-6.7b-lora: adapter_config.json,adapter_model.bin<jupyter_text>Load adapters from the HubYou can also directly load adapters from the Hub using the commands below:<jupyter_code>import torch from peft import PeftModel, PeftConfig from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig peft_model_id = "ybelkada/opt-6.7b-lora" config = PeftConfig.from_pretrained(peft_model_id) model = AutoModelForCausalLM.from_pretrained( config.base_model_name_or_path, return_dict=True, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto" ) tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path) # Load the Lora model model = PeftModel.from_pretrained(model, peft_model_id)<jupyter_output><empty_output><jupyter_text>InferenceYou can then directly use the trained model or the model that you have loaded from the 🤗 Hub for inference as you would do it usually in `transformers`.<jupyter_code>batch = tokenizer("Two things are infinite: ", return_tensors="pt") with torch.cuda.amp.autocast(): output_tokens = model.generate(**batch, max_new_tokens=50) print("\n\n", tokenizer.decode(output_tokens[0], skip_special_tokens=True))<jupyter_output>/home/marc/anaconda3/envs/accelerate/lib/python3.10/site-packages/transformers/generation/utils.py:1448: UserWarning: You are calling .generate() with the `input_ids` being on a device type different than your model's device. `input_ids` is on cpu, whereas the model is on cuda. You may experience unexpected behaviors or slower generation. Please make sure that you have put `input_ids` to the correct device by calling for example input_ids = input_ids.to('cuda') before running `.generate()`. warnings.warn(
peft/examples/int8_training/Finetune_opt_bnb_peft.ipynb/0
{ "file_path": "peft/examples/int8_training/Finetune_opt_bnb_peft.ipynb", "repo_id": "peft", "token_count": 2755 }
175
<jupyter_start><jupyter_code>!pip install -q git+https://github.com/huggingface/transformers.git !pip install -q git+https://github.com/huggingface/peft.git !pip install -q git+https://github.com/huggingface/accelerate.git@main !pip install huggingface_hub !pip install bitsandbytes !pip install SentencePiece import os os.environ["CUDA_VISIBLE_DEVICES"] = "0" from huggingface_hub import notebook_login import torch notebook_login() from peft import PeftModel from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig, BitsAndBytesConfig model_name = "decapoda-research/llama-7b-hf" tokenizer = LlamaTokenizer.from_pretrained(model_name) model = LlamaForCausalLM.from_pretrained(model_name, quantization_config=BitsAndBytesConfig(load_in_8bit=True), device_map="auto", use_auth_token=True) %%time model = PeftModel.from_pretrained(model, "tloen/alpaca-lora-7b", adapter_name="eng_alpaca") %%time model.load_adapter("22h/cabrita-lora-v0-1", adapter_name="portuguese_alpaca") model model.to("cuda") import torch device = "cuda" def generate_prompt(instruction, input=None): if input: return f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Input: {input} ### Response:""" else: return f"""Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: {instruction} ### Response:""" def evaluate( instruction, input=None, temperature=0.1, top_p=0.75, top_k=40, num_beams=4, max_new_tokens=256, **kwargs, ): prompt = generate_prompt(instruction, input) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to(device) generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, no_repeat_ngram_size=3, **kwargs, ) with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens, ) s = generation_output.sequences[0] output = tokenizer.decode(s) return output.split("### Response:")[1].strip() %%time model.set_adapter("eng_alpaca") instruction = "Tell me about alpacas." print(evaluate(instruction)) %%time model.set_adapter("portuguese_alpaca") instruction = "Invente uma desculpa criativa pra dizer que não preciso ir à festa." print(evaluate(instruction)) with model.disable_adapter(): instruction = "Invente uma desculpa criativa pra dizer que não preciso ir à festa." print(evaluate(instruction))<jupyter_output>I'm sorry, but I can't go to the party. I'm sick. I have a cold. I don't feel well. I need to stay at home and rest. I have a lot of homework to do. My dog ate my homework. My homework is too hard. I didn't have time to do it. It's too late. I forgot about it. My parents won't let me go. My parents are out of town. They're on vacation. They have to work. They are sick. They need to take care of my brother. They're not home. They went to the grocery store. They took the car to the mechanic. They had to go to a meeting. They were in a hurry. They forgot about me. Their car broke down. Their car ran out of gas. They got a flat tire. They couldn't find a parking space. They didn' t have enough money. They lost their wallet. It's raining. The roads are icy. There's a blizzard. There are too many cars on the road. There was an accident.
peft/examples/multi_adapter_examples/PEFT_Multi_LoRA_Inference.ipynb/0
{ "file_path": "peft/examples/multi_adapter_examples/PEFT_Multi_LoRA_Inference.ipynb", "repo_id": "peft", "token_count": 1344 }
176
# Supervised Fine-tuning (SFT) with PEFT In this example, we'll see how to use [PEFT](https://github.com/huggingface/peft) to perform SFT using PEFT on various distributed setups. ## Single GPU SFT with QLoRA QLoRA uses 4-bit quantization of the base model to drastically reduce the GPU memory consumed by the base model while using LoRA for parameter-efficient fine-tuning. The command to use QLoRA is present at [run_peft.sh](https://github.com/huggingface/peft/blob/main/examples/sft/run_peft.sh). Note: 1. At present, `use_reentrant` needs to be `True` when using gradient checkpointing with QLoRA else QLoRA leads to high GPU memory consumption. ## Single GPU SFT with QLoRA using Unsloth [Unsloth](https://github.com/unslothai/unsloth) enables finetuning Mistral/Llama 2-5x faster with 70% less memory. It achieves this by reducing data upcasting, using Flash Attention 2, custom Triton kernels for RoPE embeddings, RMS Layernorm & Cross Entropy Loss and manual clever autograd computation to reduce the FLOPs during QLoRA finetuning. Below is the list of the optimizations from the Unsloth blogpost [mistral-benchmark](https://unsloth.ai/blog/mistral-benchmark). The command to use QLoRA with Unsloth is present at [run_unsloth_peft.sh](https://github.com/huggingface/peft/blob/main/examples/sft/run_unsloth_peft.sh). <div class="flex justify-center"> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/peft/Unsloth.png"/> </div> <small>Optimization in Unsloth to speed up QLoRA finetuning while reducing GPU memory usage</small> ## Multi-GPU SFT with QLoRA To speed up QLoRA finetuning when you have access to multiple GPUs, look at the launch command at [run_peft_multigpu.sh](https://github.com/huggingface/peft/blob/main/examples/sft/run_peft_multigpu.sh). This example to performs DDP on 8 GPUs. Note: 1. At present, `use_reentrant` needs to be `False` when using gradient checkpointing with Multi-GPU QLoRA else it will lead to errors. However, this leads to huge GPU memory consumption. ## Multi-GPU SFT with LoRA and DeepSpeed When you have access to multiple GPUs, it would be better to use normal LoRA with DeepSpeed/FSDP. To use LoRA with DeepSpeed, refer the docs at [PEFT with DeepSpeed](https://huggingface.co/docs/peft/accelerate/deepspeed). ## Multi-GPU SFT with LoRA and FSDP When you have access to multiple GPUs, it would be better to use normal LoRA with DeepSpeed/FSDP. To use LoRA with DeepSpeed, refer the docs at [PEFT with FSDP](https://huggingface.co/docs/peft/accelerate/fsdp).
peft/examples/sft/README.md/0
{ "file_path": "peft/examples/sft/README.md", "repo_id": "peft", "token_count": 807 }
177
import argparse import json import logging import os from collections import Counter from dataclasses import dataclass from operator import attrgetter from typing import Dict, List, Optional, Union import safetensors import torch import torch.nn as nn from diffusers import UNet2DConditionModel from transformers import CLIPTextModel from peft import LoHaConfig, LoKrConfig, LoraConfig, PeftType, get_peft_model, set_peft_model_state_dict from peft.tuners.lokr.layer import factorization # Default kohya_ss LoRA replacement modules # https://github.com/kohya-ss/sd-scripts/blob/c924c47f374ac1b6e33e71f82948eb1853e2243f/networks/lora.py#L661 UNET_TARGET_REPLACE_MODULE = ["Transformer2DModel", "Attention"] UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"] TEXT_ENCODER_TARGET_REPLACE_MODULE = ["CLIPAttention", "CLIPMLP"] PREFIX_UNET = "lora_unet" PREFIX_TEXT_ENCODER = "lora_te" @dataclass class LoRAInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lora_A: Optional[torch.Tensor] = None lora_B: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if self.lora_A is None or self.lora_B is None: raise ValueError("At least one of lora_A or lora_B is None, they must both be provided") return { f"base_model.model.{self.peft_key}.lora_A.weight": self.lora_A, f"base_model.model.{self.peft_key}.lora_B.weight": self.lora_B, } @dataclass class LoHaInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None hada_w1_a: Optional[torch.Tensor] = None hada_w1_b: Optional[torch.Tensor] = None hada_w2_a: Optional[torch.Tensor] = None hada_w2_b: Optional[torch.Tensor] = None hada_t1: Optional[torch.Tensor] = None hada_t2: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if self.hada_w1_a is None or self.hada_w1_b is None or self.hada_w2_a is None or self.hada_w2_b is None: raise ValueError( "At least one of hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b is missing, they all must be provided" ) state_dict = { f"base_model.model.{self.peft_key}.hada_w1_a": self.hada_w1_a, f"base_model.model.{self.peft_key}.hada_w1_b": self.hada_w1_b, f"base_model.model.{self.peft_key}.hada_w2_a": self.hada_w2_a, f"base_model.model.{self.peft_key}.hada_w2_b": self.hada_w2_b, } if not ( (self.hada_t1 is None and self.hada_t2 is None) or (self.hada_t1 is not None and self.hada_t2 is not None) ): raise ValueError("hada_t1 and hada_t2 must be either both present or not present at the same time") if self.hada_t1 is not None and self.hada_t2 is not None: state_dict[f"base_model.model.{self.peft_key}.hada_t1"] = self.hada_t1 state_dict[f"base_model.model.{self.peft_key}.hada_t2"] = self.hada_t2 return state_dict @dataclass class LoKrInfo: kohya_key: str peft_key: str alpha: Optional[float] = None rank: Optional[int] = None lokr_w1: Optional[torch.Tensor] = None lokr_w1_a: Optional[torch.Tensor] = None lokr_w1_b: Optional[torch.Tensor] = None lokr_w2: Optional[torch.Tensor] = None lokr_w2_a: Optional[torch.Tensor] = None lokr_w2_b: Optional[torch.Tensor] = None lokr_t2: Optional[torch.Tensor] = None def peft_state_dict(self) -> Dict[str, torch.Tensor]: if (self.lokr_w1 is None) and ((self.lokr_w1_a is None) or (self.lokr_w1_b is None)): raise ValueError("Either lokr_w1 or both lokr_w1_a and lokr_w1_b should be provided") if (self.lokr_w2 is None) and ((self.lokr_w2_a is None) or (self.lokr_w2_b is None)): raise ValueError("Either lokr_w2 or both lokr_w2_a and lokr_w2_b should be provided") state_dict = {} if self.lokr_w1 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w1"] = self.lokr_w1 elif self.lokr_w1_a is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w1_a"] = self.lokr_w1_a state_dict[f"base_model.model.{self.peft_key}.lokr_w1_b"] = self.lokr_w1_b if self.lokr_w2 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w2"] = self.lokr_w2 elif self.lokr_w2_a is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_w2_a"] = self.lokr_w2_a state_dict[f"base_model.model.{self.peft_key}.lokr_w2_b"] = self.lokr_w2_b if self.lokr_t2 is not None: state_dict[f"base_model.model.{self.peft_key}.lokr_t2"] = self.lokr_t2 return state_dict def construct_peft_loraconfig(info: Dict[str, LoRAInfo], **kwargs) -> LoraConfig: """Constructs LoraConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoRAInfo]): Information extracted from adapter checkpoint Returns: LoraConfig: config for constructing LoRA """ # Unpack all ranks and alphas ranks = {key: val.rank for key, val in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) lora_alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != lora_alpha, alphas.items()), key=lambda x: x[0])) config = LoraConfig( r=r, lora_alpha=lora_alpha, target_modules=target_modules, lora_dropout=0.0, bias="none", init_lora_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, ) return config def construct_peft_lohaconfig(info: Dict[str, LoHaInfo], **kwargs) -> LoHaConfig: """Constructs LoHaConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoHaInfo]): Information extracted from adapter checkpoint Returns: LoHaConfig: config for constructing LoHA """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0])) # Determine whether any of modules have effective conv2d decomposition use_effective_conv2d = any((val.hada_t1 is not None) or (val.hada_t2 is not None) for val in info.values()) config = LoHaConfig( r=r, alpha=alpha, target_modules=target_modules, rank_dropout=0.0, module_dropout=0.0, init_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, use_effective_conv2d=use_effective_conv2d, ) return config def construct_peft_lokrconfig(info: Dict[str, LoKrInfo], decompose_factor: int = -1, **kwargs) -> LoKrConfig: """Constructs LoKrConfig from data extracted from adapter checkpoint Args: info (Dict[str, LoKrInfo]): Information extracted from adapter checkpoint Returns: LoKrConfig: config for constructing LoKr """ # Unpack all ranks and alphas ranks = {x[0]: x[1].rank for x in info.items()} alphas = {x[0]: x[1].alpha or x[1].rank for x in info.items()} # Determine which modules needs to be transformed target_modules = sorted(info.keys()) # Determine most common rank and alpha r = int(Counter(ranks.values()).most_common(1)[0][0]) alpha = Counter(alphas.values()).most_common(1)[0][0] # Determine which modules have different rank and alpha rank_pattern = dict(sorted(filter(lambda x: x[1] != r, ranks.items()), key=lambda x: x[0])) alpha_pattern = dict(sorted(filter(lambda x: x[1] != alpha, alphas.items()), key=lambda x: x[0])) # Determine whether any of modules have effective conv2d decomposition use_effective_conv2d = any((val.lokr_t2 is not None) for val in info.values()) # decompose_both should be enabled if any w1 matrix in any layer is decomposed into 2 decompose_both = any((val.lokr_w1_a is not None and val.lokr_w1_b is not None) for val in info.values()) # Determining decompose factor is a bit tricky (but it is most often -1) # Check that decompose_factor is equal to provided for val in info.values(): # Determine shape of first matrix if val.lokr_w1 is not None: w1_shape = tuple(val.lokr_w1.shape) else: w1_shape = (val.lokr_w1_a.shape[0], val.lokr_w1_b.shape[1]) # Determine shape of second matrix if val.lokr_w2 is not None: w2_shape = tuple(val.lokr_w2.shape[:2]) elif val.lokr_t2 is not None: w2_shape = (val.lokr_w2_a.shape[1], val.lokr_w2_b.shape[1]) else: # We may iterate over Conv2d layer, for which second item in shape is multiplied by ksize^2 w2_shape = (val.lokr_w2_a.shape[0], val.lokr_w2_b.shape[1]) # We need to check, whether decompose_factor is really -1 or not shape = (w1_shape[0], w2_shape[0]) if factorization(shape[0] * shape[1], factor=-1) != shape: raise ValueError("Cannot infer decompose_factor, probably it is not equal to -1") config = LoKrConfig( r=r, alpha=alpha, target_modules=target_modules, rank_dropout=0.0, module_dropout=0.0, init_weights=False, rank_pattern=rank_pattern, alpha_pattern=alpha_pattern, use_effective_conv2d=use_effective_conv2d, decompose_both=decompose_both, decompose_factor=decompose_factor, ) return config def combine_peft_state_dict(info: Dict[str, Union[LoRAInfo, LoHaInfo]]) -> Dict[str, torch.Tensor]: result = {} for key_info in info.values(): result.update(key_info.peft_state_dict()) return result def detect_adapter_type(keys: List[str]) -> PeftType: # Detect type of adapter by keys # Inspired by this: # https://github.com/bmaltais/kohya_ss/blob/ed4e3b0239a40506de9a17e550e6cf2d0b867a4f/tools/lycoris_utils.py#L312 for key in keys: if "alpha" in key: continue elif any(x in key for x in ["lora_down", "lora_up"]): # LoRA return PeftType.LORA elif any(x in key for x in ["hada_w1", "hada_w2", "hada_t1", "hada_t2"]): # LoHa may have the following keys: # hada_w1_a, hada_w1_b, hada_w2_a, hada_w2_b, hada_t1, hada_t2 return PeftType.LOHA elif any(x in key for x in ["lokr_w1", "lokr_w2", "lokr_t1", "lokr_t2"]): # LoKr may have the following keys: # lokr_w1, lokr_w2, lokr_w1_a, lokr_w1_b, lokr_w2_a, lokr_w2_b, lokr_t1, lokr_t2 return PeftType.LOKR elif "diff" in key: raise ValueError("Currently full diff adapters are not implemented") else: raise ValueError("Unknown adapter type, probably not implemented") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--sd_checkpoint", default=None, type=str, required=True, help="SD checkpoint to use") parser.add_argument( "--adapter_path", default=None, type=str, required=True, help="Path to downloaded adapter to convert", ) parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output peft adapter.") parser.add_argument("--half", action="store_true", help="Save weights in half precision.") parser.add_argument( "--loha_conv2d_weights_fix", action="store_true", help="""LoHa checkpoints trained with lycoris-lora<=1.9.0 contain a bug described in this PR https://github.com/KohakuBlueleaf/LyCORIS/pull/115. This option fixes this bug during weight conversion (replaces hada_t2 with hada_t1 for Conv2d 3x3 layers). The output results may differ from webui, but in general, they should be better in terms of quality. This option should be set to True in case the provided checkpoint has been trained with lycoris-lora version for which the mentioned PR wasn't merged. This option should be set to False in case the provided checkpoint has been trained with lycoris-lora version for which the mentioned PR is merged or full compatibility with webui outputs is required.""", ) args = parser.parse_args() # Load all models that we need to add adapter to text_encoder = CLIPTextModel.from_pretrained(args.sd_checkpoint, subfolder="text_encoder") unet = UNet2DConditionModel.from_pretrained(args.sd_checkpoint, subfolder="unet") # Construct possible mapping from kohya keys to peft keys models_keys = {} for model, model_key, model_name in [ (text_encoder, PREFIX_TEXT_ENCODER, "text_encoder"), (unet, PREFIX_UNET, "unet"), ]: models_keys.update( { f"{model_key}.{peft_key}".replace(".", "_"): peft_key for peft_key in (x[0] for x in model.named_modules()) } ) # Store conversion info (model_type -> peft_key -> LoRAInfo | LoHaInfo | LoKrInfo) adapter_info: Dict[str, Dict[str, Union[LoRAInfo, LoHaInfo, LoKrInfo]]] = { "text_encoder": {}, "unet": {}, } # Store decompose_factor for LoKr decompose_factor = -1 # Open adapter checkpoint with safetensors.safe_open(args.adapter_path, framework="pt", device="cpu") as f: # Extract information about adapter structure metadata = f.metadata() # It may be difficult to determine rank for LoKr adapters # If checkpoint was trained with large rank it may not be utilized during weights creation at all # So we need to get it from checkpoint metadata (along with decompose_factor) rank, conv_rank = None, None if metadata is not None: rank = metadata.get("ss_network_dim", None) rank = int(rank) if rank else None if "ss_network_args" in metadata: network_args = json.loads(metadata["ss_network_args"]) conv_rank = network_args.get("conv_dim", None) conv_rank = int(conv_rank) if conv_rank else rank decompose_factor = network_args.get("factor", -1) decompose_factor = int(decompose_factor) # Detect adapter type based on keys adapter_type = detect_adapter_type(f.keys()) adapter_info_cls = { PeftType.LORA: LoRAInfo, PeftType.LOHA: LoHaInfo, PeftType.LOKR: LoKrInfo, }[adapter_type] # Iterate through available info and unpack all the values for key in f.keys(): kohya_key, kohya_type = key.split(".")[:2] # Find which model this key belongs to if kohya_key.startswith(PREFIX_TEXT_ENCODER): model_type, model = "text_encoder", text_encoder elif kohya_key.startswith(PREFIX_UNET): model_type, model = "unet", unet else: raise ValueError(f"Cannot determine model for key: {key}") # Find corresponding peft key if kohya_key not in models_keys: raise ValueError(f"Cannot find corresponding key for diffusers/transformers model: {kohya_key}") peft_key = models_keys[kohya_key] # Retrieve corresponding layer of model layer = attrgetter(peft_key)(model) # Create a corresponding adapter info if peft_key not in adapter_info[model_type]: adapter_info[model_type][peft_key] = adapter_info_cls(kohya_key=kohya_key, peft_key=peft_key) tensor = f.get_tensor(key) if kohya_type == "alpha": adapter_info[model_type][peft_key].alpha = tensor.item() elif kohya_type == "lora_down": adapter_info[model_type][peft_key].lora_A = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lora_up": adapter_info[model_type][peft_key].lora_B = tensor adapter_info[model_type][peft_key].rank = tensor.shape[1] elif kohya_type == "hada_w1_a": adapter_info[model_type][peft_key].hada_w1_a = tensor elif kohya_type == "hada_w1_b": adapter_info[model_type][peft_key].hada_w1_b = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "hada_w2_a": adapter_info[model_type][peft_key].hada_w2_a = tensor elif kohya_type == "hada_w2_b": adapter_info[model_type][peft_key].hada_w2_b = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type in {"hada_t1", "hada_t2"}: if args.loha_conv2d_weights_fix: if kohya_type == "hada_t1": # This code block fixes a bug that exists for some LoHa checkpoints # that resulted in accidentally using hada_t1 weight instead of hada_t2, see # https://github.com/KohakuBlueleaf/LyCORIS/pull/115 adapter_info[model_type][peft_key].hada_t1 = tensor adapter_info[model_type][peft_key].hada_t2 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] else: if kohya_type == "hada_t1": adapter_info[model_type][peft_key].hada_t1 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "hada_t2": adapter_info[model_type][peft_key].hada_t2 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lokr_t2": adapter_info[model_type][peft_key].lokr_t2 = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lokr_w1": adapter_info[model_type][peft_key].lokr_w1 = tensor if isinstance(layer, nn.Linear) or ( isinstance(layer, nn.Conv2d) and tuple(layer.weight.shape[2:]) == (1, 1) ): adapter_info[model_type][peft_key].rank = rank elif isinstance(layer, nn.Conv2d): adapter_info[model_type][peft_key].rank = conv_rank elif kohya_type == "lokr_w2": adapter_info[model_type][peft_key].lokr_w2 = tensor if isinstance(layer, nn.Linear) or ( isinstance(layer, nn.Conv2d) and tuple(layer.weight.shape[2:]) == (1, 1) ): adapter_info[model_type][peft_key].rank = rank elif isinstance(layer, nn.Conv2d): adapter_info[model_type][peft_key].rank = conv_rank elif kohya_type == "lokr_w1_a": adapter_info[model_type][peft_key].lokr_w1_a = tensor adapter_info[model_type][peft_key].rank = tensor.shape[1] elif kohya_type == "lokr_w1_b": adapter_info[model_type][peft_key].lokr_w1_b = tensor adapter_info[model_type][peft_key].rank = tensor.shape[0] elif kohya_type == "lokr_w2_a": adapter_info[model_type][peft_key].lokr_w2_a = tensor elif kohya_type == "lokr_w2_b": adapter_info[model_type][peft_key].lokr_w2_b = tensor else: raise ValueError(f"Unknown weight name in key: {key} - {kohya_type}") # Get function which will create adapter config based on extracted info construct_config_fn = { PeftType.LORA: construct_peft_loraconfig, PeftType.LOHA: construct_peft_lohaconfig, PeftType.LOKR: construct_peft_lokrconfig, }[adapter_type] # Process each model sequentially for model, model_name in [(text_encoder, "text_encoder"), (unet, "unet")]: # Skip model if no data was provided if len(adapter_info[model_name]) == 0: continue config = construct_config_fn(adapter_info[model_name], decompose_factor=decompose_factor) # Output warning for LoHa with use_effective_conv2d if ( isinstance(config, LoHaConfig) and getattr(config, "use_effective_conv2d", False) and args.loha_conv2d_weights_fix is False ): logging.warning( 'lycoris-lora<=1.9.0 LoHa implementation contains a bug, which can be fixed with "--loha_conv2d_weights_fix".\n' "For more info, please refer to https://github.com/huggingface/peft/pull/1021 and https://github.com/KohakuBlueleaf/LyCORIS/pull/115" ) model = get_peft_model(model, config) missing_keys, unexpected_keys = set_peft_model_state_dict( model, combine_peft_state_dict(adapter_info[model_name]) ) if len(unexpected_keys) > 0: raise ValueError(f"Unexpected keys {unexpected_keys} found during conversion") if args.half: model.to(torch.float16) # Save model to disk model.save_pretrained(os.path.join(args.dump_path, model_name))
peft/examples/stable_diffusion/convert_sd_adapter_to_peft.py/0
{ "file_path": "peft/examples/stable_diffusion/convert_sd_adapter_to_peft.py", "repo_id": "peft", "token_count": 10390 }
178
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations import os from contextlib import contextmanager from typing import Any, Optional, Union import torch from accelerate.hooks import remove_hook_from_submodules from torch import nn from transformers.utils import PushToHubMixin from peft.tuners.mixed import COMPATIBLE_TUNER_TYPES from .config import PeftConfig from .peft_model import PeftModel from .tuners import ( AdaLoraModel, IA3Model, LoHaModel, LoKrModel, LoraModel, MixedModel, OFTModel, ) from .utils import PeftType, _set_adapter, _set_trainable PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.LOHA: LoHaModel, PeftType.LOKR: LoKrModel, PeftType.ADALORA: AdaLoraModel, PeftType.IA3: IA3Model, PeftType.OFT: OFTModel, } def _prepare_model_for_gradient_checkpointing(model: nn.Module) -> None: r""" Prepares the model for gradient checkpointing if necessary """ # Note: same as PeftModel._prepare_model_for_gradient_checkpointing if not getattr(model, "is_gradient_checkpointing", True): return model if not ( getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False) or getattr(model, "is_quantized", False) ): if hasattr(model, "enable_input_require_grads"): model.enable_input_require_grads() elif hasattr(model, "get_input_embeddings"): def make_inputs_require_grad(module, input, output): output.requires_grad_(True) model.get_input_embeddings().register_forward_hook(make_inputs_require_grad) def _check_config_compatible(peft_config: PeftConfig) -> None: if peft_config.peft_type not in COMPATIBLE_TUNER_TYPES: raise ValueError( f"The provided `peft_type` '{peft_config.peft_type.value}' is not compatible with the `PeftMixedModel`. " f"Compatible types are: {COMPATIBLE_TUNER_TYPES}" ) class PeftMixedModel(PushToHubMixin, torch.nn.Module): """ PeftMixedModel for loading mixing different types of adapters for inference. This class does not support loading/saving, and it shouldn't usually be initialized directly. Instead, use `get_peft_model` with the argument `mixed=True`. <Tip> Read the [Mixed adapter types](https://huggingface.co/docs/peft/en/developer_guides/mixed_models) guide to learn more about using different adapter types. </Tip> Example: ```py >>> from peft import get_peft_model >>> base_model = ... # load the base model, e.g. from transformers >>> peft_model = PeftMixedModel.from_pretrained(base_model, path_to_adapter1, "adapter1").eval() >>> peft_model.load_adapter(path_to_adapter2, "adapter2") >>> peft_model.set_adapter(["adapter1", "adapter2"]) # activate both adapters >>> peft_model(data) # forward pass using both adapters ``` Args: model (`torch.nn.Module`): The model to be tuned. config (`PeftConfig`): The config of the model to be tuned. The adapter type must be compatible. adapter_name (`str`, `optional`, defaults to `"default"`): The name of the first adapter. """ def __init__(self, model: nn.Module, peft_config: PeftConfig, adapter_name: str = "default") -> None: super().__init__() _check_config_compatible(peft_config) _prepare_model_for_gradient_checkpointing(model) self.modules_to_save = None self.base_model = MixedModel(model, {adapter_name: peft_config}, adapter_name) self.set_modules_to_save(peft_config, adapter_name) self.config = getattr(model, "config", {"model_type": "custom"}) # the `pretraining_tp` is set for some models to simulate Tensor Parallelism during inference to avoid # numerical differences, https://github.com/pytorch/pytorch/issues/76232 - to avoid any unexpected # behavior we disable that in this line. if hasattr(self.base_model, "config") and hasattr(self.base_model.config, "pretraining_tp"): self.base_model.config.pretraining_tp = 1 @property def peft_config(self) -> dict[str, PeftConfig]: return self.base_model.peft_config @property def active_adapter(self) -> str: return self.base_model.active_adapter @property def active_adapters(self) -> list[str]: return self.base_model.active_adapters def get_nb_trainable_parameters(self): r""" Returns the number of trainable parameters and number of all parameters in the model. """ # note: same as PeftModel.get_nb_trainable_parameters trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel # Due to the design of 4bit linear layers from bitsandbytes # one needs to multiply the number of parameters by 2 to get # the correct number of parameters if param.__class__.__name__ == "Params4bit": num_params = num_params * 2 all_param += num_params if param.requires_grad: trainable_params += num_params return trainable_params, all_param def print_trainable_parameters(self): """ Prints the number of trainable parameters in the model. Note: print_trainable_parameters() uses get_nb_trainable_parameters() which is different from num_parameters(only_trainable=True) from huggingface/transformers. get_nb_trainable_parameters() returns (trainable parameters, all parameters) of the Peft Model which includes modified backbone transformer model. For techniques like LoRA, the backbone transformer model is modified in place with LoRA modules. However, for prompt tuning, the backbone transformer model is unmodified. num_parameters(only_trainable=True) returns number of trainable parameters of the backbone transformer model which can be different. """ # note: same as PeftModel.print_trainable_parameters trainable_params, all_param = self.get_nb_trainable_parameters() print( f"trainable params: {trainable_params:,d} || " f"all params: {all_param:,d} || " f"trainable%: {100 * trainable_params / all_param:.4f}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.base_model, name) def forward(self, *args: Any, **kwargs: Any): """ Forward pass of the model. """ return self.base_model(*args, **kwargs) def generate(self, *args: Any, **kwargs: Any): """ Generate output. """ return self.base_model.generate(*args, **kwargs) @contextmanager def disable_adapter(self): """ Disables the adapter module. """ try: self.base_model.disable_adapter_layers() yield finally: self.base_model.enable_adapter_layers() def add_adapter(self, adapter_name: str, peft_config: PeftConfig): _check_config_compatible(peft_config) try: self.peft_config[adapter_name] = peft_config self.base_model.inject_adapter(self, adapter_name) except Exception: # something went wrong, roll back if adapter_name in self.peft_config: del self.peft_config[adapter_name] raise self.set_modules_to_save(peft_config, adapter_name) def set_modules_to_save(self, peft_config: PeftConfig, adapter_name: str) -> None: if (modules_to_save := getattr(peft_config, "modules_to_save", None)) is None: return if self.modules_to_save is None: self.modules_to_save = set(modules_to_save) else: self.modules_to_save.update(modules_to_save) _set_trainable(self, adapter_name) def set_adapter(self, adapter_name: Union[str, list[str]]) -> None: """ Sets the active adapter(s) for the model. Note that the order in which the adapters are applied during the forward pass may not be the same as the order in which they are passed to this function. Instead, the order during the forward pass is determined by the order in which the adapters were loaded into the model. The active adapters only determine which adapters are active during the forward pass, but not the order in which they are applied. Additionally, this function will set the specified adapters to trainable (i.e., requires_grad=True). If this is not desired, use the following code. ```py >>> for name, param in model_peft.named_parameters(): ... if ...: # some check on name (ex. if 'lora' in name) ... param.requires_grad = False ``` Args: adapter_name (`str` or `List[str]`): The name of the adapter(s) to be activated. """ if isinstance(adapter_name, str): adapter_name = [adapter_name] mismatched = set(adapter_name) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) def delete_adapter(self, adapter_name: Union[str, list[str]]) -> None: if isinstance(adapter_name, str): adapter_name = [adapter_name] mismatched = set(adapter_name) - set(self.peft_config.keys()) if mismatched: raise ValueError( f"Adapter(s) {sorted(mismatched)} not found, available adapters: {sorted(self.peft_config.keys())}" ) self.base_model.delete_adapter(adapter_name) def merge_and_unload(self, *args: Any, **kwargs: Any): r""" This method merges the adapter layers into the base model. This is needed if someone wants to use the base model as a standalone model. Args: progressbar (`bool`): whether to show a progressbar indicating the unload and merge process safe_merge (`bool`): whether to activate the safe merging check to check if there is any potential Nan in the adapter weights adapter_names (`List[str]`, *optional*): The list of adapter names that should be merged. If None, all active adapters will be merged. Defaults to `None`. """ return self.base_model.merge_and_unload(*args, **kwargs) def unload(self, *args: Any, **kwargs: Any): """ Gets back the base model by removing all the adapter modules without merging. This gives back the original base model. """ return self.base_model.unload(*args, **kwargs) @classmethod def _split_kwargs(cls, kwargs: dict[str, Any]): return PeftModel._split_kwargs(kwargs) def load_adapter(self, model_id: str, adapter_name: str, *args: Any, **kwargs: Any): output = PeftModel.load_adapter(self, model_id, adapter_name, *args, **kwargs) # TODO: not quite clear why this is necessary but tests fail without it self.set_adapter(self.active_adapters) return output def create_or_update_model_card(self, output_dir: str): raise NotImplementedError(f"Model card creation is not supported for {self.__class__.__name__} (yet).") def save_pretrained( self, save_directory: str, safe_serialization: bool = False, selected_adapters: Optional[list[str]] = None, **kwargs: Any, ): raise NotImplementedError(f"Saving is not supported for {self.__class__.__name__} (yet).") @classmethod def from_pretrained( cls, model: nn.Module, model_id: str | os.PathLike, adapter_name: str = "default", is_trainable: bool = False, config: Optional[PeftConfig] = None, **kwargs: Any, ): r""" Instantiate a PEFT mixed model from a pretrained model and loaded PEFT weights. Note that the passed `model` may be modified inplace. Args: model (`nn.Module`): The model to be adapted. model_id (`str` or `os.PathLike`): The name of the PEFT configuration to use. Can be either: - A string, the `model id` of a PEFT configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a PEFT configuration file saved using the `save_pretrained` method (`./my_peft_config_directory/`). adapter_name (`str`, *optional*, defaults to `"default"`): The name of the adapter to be loaded. This is useful for loading multiple adapters. is_trainable (`bool`, *optional*, defaults to `False`): Whether the adapter should be trainable or not. If `False`, the adapter will be frozen and use for inference config ([`~peft.PeftConfig`], *optional*): The configuration object to use instead of an automatically loaded configuration. This configuration object is mutually exclusive with `model_id` and `kwargs`. This is useful when configuration is already loaded before calling `from_pretrained`. kwargs: (`optional`): Additional keyword arguments passed along to the specific PEFT configuration class. """ # note: adapted from PeftModel.from_pretrained from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING # load the config if config is None: config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig._get_peft_type( model_id, subfolder=kwargs.get("subfolder", None), revision=kwargs.get("revision", None), cache_dir=kwargs.get("cache_dir", None), use_auth_token=kwargs.get("use_auth_token", None), ) ].from_pretrained(model_id, **kwargs) elif isinstance(config, PeftConfig): config.inference_mode = not is_trainable else: raise ValueError(f"The input config must be a PeftConfig, got {config.__class__}") # note: this is different from PeftModel.from_pretrained if config.peft_type not in PEFT_TYPE_TO_MODEL_MAPPING: raise ValueError(f"Adapter of type {config.peft_type} is not supported for mixed models.") if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if config.is_prompt_learning and is_trainable: # note: should not be possible to reach, but just in case raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable # note: this is different from PeftModel.from_pretrained, we always return a PeftMixedModel model = cls(model, config, adapter_name) model.load_adapter(model_id, adapter_name, is_trainable=is_trainable, **kwargs) return model
peft/src/peft/mixed_model.py/0
{ "file_path": "peft/src/peft/mixed_model.py", "repo_id": "peft", "token_count": 6797 }
179
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import Any import torch from peft.import_utils import is_bnb_4bit_available, is_bnb_available from .layer import IA3Layer if is_bnb_available(): class Linear8bitLt(torch.nn.Module, IA3Layer): # (IA)^3 implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, is_feedforward: bool, init_ia3_weights: bool = True, **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) # Freezing the pre-trained weight matrix self.get_base_layer().weight.requires_grad = False self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # note: no check for self.merged because merging is not supported (yet) if self.disable_adapters: return self.base_layer(x) ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue ia3_scaling *= self.ia3_l[active_adapter].flatten() requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) if requires_conversion: x = x.float() if self.is_feedforward: result = self.base_layer(x * ia3_scaling) expected_dtype = result.dtype else: result = self.base_layer(x) expected_dtype = result.dtype result = result * ia3_scaling if requires_conversion: result = result.to(expected_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "ia3." + rep if is_bnb_4bit_available(): class Linear4bit(torch.nn.Module, IA3Layer): # IA3 implemented in a dense layer def __init__( self, base_layer: torch.nn.Module, adapter_name: str, is_feedforward: bool, init_ia3_weights: bool = True, **kwargs, ) -> None: super().__init__() IA3Layer.__init__(self, base_layer, is_feedforward=is_feedforward) # Freezing the pre-trained weight matrix self.get_base_layer().weight.requires_grad = False self._active_adapter = adapter_name self.update_layer(adapter_name, init_ia3_weights) def forward(self, x: torch.Tensor, *args: Any, **kwargs: Any) -> torch.Tensor: # note: no check for self.merged because merging is not supported (yet) if self.disable_adapters: return self.base_layer(x) ia3_scaling = 1 for active_adapter in self.active_adapters: if active_adapter not in self.ia3_l.keys(): continue ia3_scaling *= self.ia3_l[active_adapter].flatten() requires_conversion = (not torch.is_autocast_enabled()) and (x.dtype != torch.float32) if requires_conversion: x = x.float() if self.is_feedforward: result = self.base_layer(x * ia3_scaling) expected_dtype = result.dtype else: result = self.base_layer(x) expected_dtype = result.dtype result = result * ia3_scaling result = result.clone() # adalora.py and lora.py both suggest that this is necessary for 4-bit training on older versions of Pytorch. # This has been duplicated here. if requires_conversion: result = result.to(expected_dtype) return result def __repr__(self) -> str: rep = super().__repr__() return "ia3." + rep
peft/src/peft/tuners/ia3/bnb.py/0
{ "file_path": "peft/src/peft/tuners/ia3/bnb.py", "repo_id": "peft", "token_count": 2193 }
180
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import annotations from dataclasses import dataclass, field from typing import Literal, Optional, Union from peft.config import PeftConfig from peft.utils import PeftType @dataclass class LoftQConfig: """ This is the sub-configuration class to store the configuration of a [`LoraModel`]. Args: bits_pattern (`dict`): The mapping from layer names or regexp expression to bits which are different from the default bits specified by `bits`. For example, `{model.decoder.layers.0.encoder_attn.k_proj: 2`}. bits (`int`): Quantization bits for LoftQ. iter (`int`): Alternating iterations for LoftQ. fake (`bool`): True: use fp16/fp32; used for first time to save weights. False: use bitsandbytes 4bit linear models. weights can't be saved. Recommend to set to True, save the weights and load the saved weights in 4 bits. """ loftq_bits: int = field(default=4, metadata={"help": "Quantization bits for LoftQ"}) loftq_iter: int = field(default=1, metadata={"help": "Alternating iterations for LoftQ"}) @dataclass class LoraConfig(PeftConfig): """ This is the configuration class to store the configuration of a [`LoraModel`]. Args: r (`int`): Lora attention dimension (the "rank"). target_modules (`Optional[Union[List[str], str]]`): The names of the modules to apply the adapter to. If this is specified, only the modules with the specified names will be replaced. When passing a string, a regex match will be performed. When passing a list of strings, either an exact match will be performed or it is checked if the name of the module ends with any of the passed strings. If this is specified as 'all-linear', then all linear/Conv1D modules are chosen, excluding the output layer. If this is not specified, modules will be chosen according to the model architecture. If the architecture is not known, an error will be raised -- in this case, you should specify the target modules manually. lora_alpha (`int`): The alpha parameter for Lora scaling. lora_dropout (`float`): The dropout probability for Lora layers. fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out). For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`. bias (`str`): Bias type for LoRA. Can be 'none', 'all' or 'lora_only'. If 'all' or 'lora_only', the corresponding biases will be updated during training. Be aware that this means that, even when disabling the adapters, the model will not produce the same output as the base model would have without adaptation. use_rslora (`bool`): When set to True, uses <a href='https://doi.org/10.48550/arXiv.2312.03732'>Rank-Stabilized LoRA</a> which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it was proven to work better. Otherwise, it will use the original default value of `lora_alpha/r`. modules_to_save (`List[str]`): List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint. init_lora_weights (`bool` | `Literal["gaussian", "loftq"]`): How to initialize the weights of the adapter layers. Passing True (default) results in the default initialization from the reference implementation from Microsoft. Passing 'gaussian' results in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization to False leads to completely random initialization and is discouraged. Pass `'loftq'` to use LoftQ initialization. layers_to_transform (`Union[List[int], int]`): The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices that are specified in this list. If a single integer is passed, it will apply the transformations on the layer at this index. layers_pattern (`str`): The layer pattern name, used only if `layers_to_transform` is different from `None`. rank_pattern (`dict`): The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. alpha_pattern (`dict`): The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. megatron_config (`Optional[dict]`): The TransformerConfig arguments for Megatron. It is used to create LoRA's parallel linear layer. You can get it like this, `core_transformer_config_from_args(get_args())`, these two functions being from Megatron. The arguments will be used to initialize the TransformerConfig of Megatron. You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and RowParallelLinear layers of megatron. megatron_core (`Optional[str]`): The core module from Megatron to use, defaults to `"megatron.core"`. loftq_config (`Optional[LoftQConfig]`): The configuration of LoftQ. If this is not None, then LoftQ will be used to quantize the backbone weights and initialize Lora layers. Also pass `init_lora_weights='loftq'`. Note that you should not pass a quantized model in this case, as LoftQ will quantize the model itself. use_dora (`bool`): Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger overhead than pure LoRA, so it is recommended to merge weights for inference. For more information, see https://arxiv.org/abs/2402.09353. layer_replication(`List[Tuple[int, int]]`): Build a new stack of layers by stacking the original model layers according to the ranges specified. This allows expanding (or shrinking) the model without duplicating the base model weights. The new layers will all have separate LoRA adapters attached to them. """ r: int = field(default=8, metadata={"help": "Lora attention dimension"}) target_modules: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": ( "List of module names or regex expression of the module names to replace with LoRA." "For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$'." "This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer." "If not specified, modules will be chosen according to the model architecture, If the architecture is " "not known, an error will be raised -- in this case, you should specify the target modules manually." ), }, ) lora_alpha: int = field(default=8, metadata={"help": "Lora alpha"}) lora_dropout: float = field(default=0.0, metadata={"help": "Lora dropout"}) fan_in_fan_out: bool = field( default=False, metadata={"help": "Set this to True if the layer to replace stores weight like (fan_in, fan_out)"}, ) bias: Literal["none", "all", "lora_only"] = field( default="none", metadata={"help": "Bias type for Lora. Can be 'none', 'all' or 'lora_only'"} ) use_rslora: bool = field( default=False, metadata={ "help": ( "When set to True, uses Rank-Stabilized LoRA doi.org/10.48550/arXiv.2312.03732" " which sets the adapter scaling factor to `lora_alpha/math.sqrt(r)`, since it" " was proven to work better. Otherwise, it will use the original default" " value of `lora_alpha/r`." ) }, ) modules_to_save: Optional[list[str]] = field( default=None, metadata={ "help": "List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. " "For example, in Sequence Classification or Token Classification tasks, " "the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved." }, ) init_lora_weights: bool | Literal["gaussian", "loftq"] = field( default=True, metadata={ "help": ( "How to initialize the weights of the LoRA layers. Passing True (default) results in the default " "initialization from the reference implementation from Microsoft. Passing 'gaussian' results " "in Gaussian initialization scaled by the LoRA rank for linear and layers. Setting the initialization " "to False leads to completely random initialization and is discouraged." "Pass `'loftq'` to use LoftQ initialization" ), }, ) layers_to_transform: Optional[Union[list[int], int]] = field( default=None, metadata={ "help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index. " "This only works when target_modules is a list of str." }, ) layers_pattern: Optional[Union[list[str], str]] = field( default=None, metadata={ "help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern." "This only works when target_modules is a list of str." }, ) rank_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to ranks which are different from the default rank specified by `r`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 8`}" ) }, ) alpha_pattern: Optional[dict] = field( default_factory=dict, metadata={ "help": ( "The mapping from layer names or regexp expression to alphas which are different from the default alpha specified by `lora_alpha`. " "For example, `{model.decoder.layers.0.encoder_attn.k_proj: 32`}" ) }, ) megatron_config: Optional[dict] = field( default=None, metadata={ "help": ( "The TransformerConfig from Megatron. It is used to create LoRA's parallel linear layer." "You can get it like this, `core_transformer_config_from_args(get_args())`, " "these two functions being from Megatron." "You need to specify this parameter when you want to apply LoRA to the ColumnParallelLinear and " "RowParallelLinear layers of megatron." "It should be noted that we may not be able to use the `save_pretrained` and `from_pretrained` " "functions, because TransformerConfig may not necessarily be serialized." "But when using megatron, we can use `get_peft_model_state_dict` function and " "megatron's framework, they can also save and load models and configurations." ) }, ) megatron_core: Optional[str] = field( default="megatron.core", metadata={ "help": ( "The core module from Megatron, it is used to create LoRA's parallel linear layer. " "It only needs to be passed in when you need to use your own modified megatron core module. " "Otherwise, it will use the default value `megatron.core`. " ) }, ) # dict type is used when loading config.json loftq_config: Union[LoftQConfig, dict] = field( default_factory=dict, metadata={ "help": ( "The configuration of LoftQ. If this is passed, then LoftQ will be used to quantize the backbone " "weights and initialize Lora layers. Also set `init_lora_weights='loftq'` in this case." ) }, ) use_dora: bool = field( default=False, metadata={ "help": ( "Enable 'Weight-Decomposed Low-Rank Adaptation' (DoRA). This technique decomposes the updates of the " "weights into two parts, magnitude and direction. Direction is handled by normal LoRA, whereas the " "magnitude is handled by a separate learnable parameter. This can improve the performance of LoRA, " "especially at low ranks. Right now, DoRA only supports linear and Conv2D layers. DoRA introduces a bigger" "overhead than pure LoRA, so it is recommended to merge weights for inference. For more information, " "see https://arxiv.org/abs/2402.09353." ) }, ) # Enables replicating layers in a model to expand it to a larger model. layer_replication: Optional[list[tuple[int, int]]] = field( default=None, metadata={ "help": ( "This enables using LoRA to effectively expand a transformer model to a larger size by repeating some layers. " "The transformation handles models (currently Llama, Bert or Falcon compatible architectures) with " "a module list in the model which it modifies to expand the number of modules. " "Base weights are shared so the memory usage is close to the original model. The intended use is these base weights " "remain fixed during finetuning but each layer has a separate LoRA adapter so the layers can be specialed via " "the adapter layers fit during fine tuning." "The format is a list of [start, end) pairs which specify the layer ranges to stack. For example:\n" " Original model has 5 layers labelled by their position in the model: `[0, 1, 2, 3, 4]`\n" " layer_replication: `[[0, 4], [2, 5]]`\n" " Final model will have this arrangement of original layers: `[0, 1, 2, 3, 2, 3, 4]`\n" "This format is based on what is used for pass-through merges in mergekit. It makes it simple to select sequential " "ranges of a model and stack them while reusing layers at either end of each sequence." ) }, ) def __post_init__(self): self.peft_type = PeftType.LORA self.target_modules = ( set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules ) # if target_modules is a regex expression, then layers_to_transform should be None if isinstance(self.target_modules, str) and self.layers_to_transform is not None: raise ValueError("`layers_to_transform` cannot be used when `target_modules` is a str.") # if target_modules is a regex expression, then layers_pattern should be None if isinstance(self.target_modules, str) and self.layers_pattern is not None: raise ValueError("`layers_pattern` cannot be used when `target_modules` is a str.") if self.use_dora and self.megatron_config: raise ValueError("DoRA does not support megatron_core, please set `use_dora=False`.") # handle init_lora_weights and loftq_config if self.init_lora_weights == "loftq": import importlib if not importlib.util.find_spec("scipy"): raise ImportError("The required package 'scipy' is not installed. Please install it to continue.") if self.loftq_config is None: raise ValueError("`loftq_config` must be specified when `init_lora_weights` is 'loftq'.") # convert loftq_config to dict if self.loftq_config and not isinstance(self.loftq_config, dict): self.loftq_config = vars(self.loftq_config)
peft/src/peft/tuners/lora/config.py/0
{ "file_path": "peft/src/peft/tuners/lora/config.py", "repo_id": "peft", "token_count": 6552 }
181
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from dataclasses import dataclass, field from typing import Union from peft.config import PromptLearningConfig from peft.utils import PeftType class PromptEncoderReparameterizationType(str, enum.Enum): MLP = "MLP" LSTM = "LSTM" @dataclass class PromptEncoderConfig(PromptLearningConfig): """ This is the configuration class to store the configuration of a [`PromptEncoder`]. Args: encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]): The type of reparameterization to use. encoder_hidden_size (`int`): The hidden size of the prompt encoder. encoder_num_layers (`int`): The number of layers of the prompt encoder. encoder_dropout (`float`): The dropout probability of the prompt encoder. """ encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field( default=PromptEncoderReparameterizationType.MLP, metadata={"help": "How to reparameterize the prompt encoder"}, ) encoder_hidden_size: int = field( default=None, metadata={"help": "The hidden size of the prompt encoder"}, ) encoder_num_layers: int = field( default=2, metadata={"help": "The number of layers of the prompt encoder"}, ) encoder_dropout: float = field( default=0.0, metadata={"help": "The dropout of the prompt encoder"}, ) def __post_init__(self): self.peft_type = PeftType.P_TUNING
peft/src/peft/tuners/p_tuning/config.py/0
{ "file_path": "peft/src/peft/tuners/p_tuning/config.py", "repo_id": "peft", "token_count": 732 }
182
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import contextmanager import packaging.version import torch import transformers @contextmanager def gather_params_ctx(module: torch.nn.Module, modifier_rank: int = 0): """Call DeepSpeed GatheredParameters context manager if DeepSpeed is enabled, otherwise do nothing.""" if packaging.version.parse(transformers.__version__) >= packaging.version.parse("4.33.0"): from transformers.integrations import is_deepspeed_zero3_enabled else: from transformers.deepspeed import is_deepspeed_zero3_enabled if not is_deepspeed_zero3_enabled(): yield return import deepspeed params_to_gather = module.parameters() with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=modifier_rank): yield return def dequantize_bnb_weight(weight: torch.nn.Parameter, state=None): """ Helper function to dequantize 4bit or 8bit bnb weights. If the weight is not a bnb quantized weight, it will be returned as is. """ if not isinstance(weight, torch.nn.Parameter): raise TypeError(f"Input weight should be of type nn.Parameter, got {type(weight)} instead") cls_name = weight.__class__.__name__ if cls_name not in ("Params4bit", "Int8Params"): return weight import bitsandbytes as bnb if cls_name == "Params4bit": return bnb.functional.dequantize_4bit(weight.data, weight.quant_state) if state.SCB is None: state.SCB = weight.SCB im = torch.eye(weight.data.shape[-1]).contiguous().half().to(weight.device) im, imt, SCim, SCimt, coo_tensorim = bnb.functional.double_quant(im) im, Sim = bnb.functional.transform(im, "col32") if state.CxB is None: state.CxB, state.SB = bnb.functional.transform(weight.data, to_order=state.formatB) out32, Sout32 = bnb.functional.igemmlt(im, state.CxB, Sim, state.SB) return bnb.functional.mm_dequant(out32, Sout32, SCim, state.SCB, bias=None).t()
peft/src/peft/utils/integrations.py/0
{ "file_path": "peft/src/peft/utils/integrations.py", "repo_id": "peft", "token_count": 890 }
183
# Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import torch from parameterized import parameterized from transformers import AutoModel from peft import PrefixTuningConfig, PromptLearningConfig from .testing_common import PeftCommonTester, PeftTestConfigManager PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST = [ "hf-internal-testing/tiny-random-BertModel", "hf-internal-testing/tiny-random-RobertaModel", "hf-internal-testing/tiny-random-DebertaModel", "hf-internal-testing/tiny-random-DebertaV2Model", ] FULL_GRID = { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "task_type": "FEATURE_EXTRACTION", } def skip_non_prompt_tuning(test_list): """Skip tests that are not prompt tuning""" return [ test for test in test_list if issubclass(test[2], PromptLearningConfig) and (test[2] != PrefixTuningConfig) ] def skip_deberta_lora_tests(test_list): r""" Skip tests that are checkpointing with lora/ia3 tests for Deberta models (couldn't find much info on the error) """ return [test for test in test_list if not (any(k in test[0] for k in ["lora", "ia3"]) and "Deberta" in test[0])] def skip_deberta_pt_tests(test_list): r""" Skip tests that are checkpointing with lora/ia3 tests for Deberta models (couldn't find much info on the error) """ return [test for test in test_list if not ("prefix_tuning" in test[0] and "Deberta" in test[0])] class PeftFeatureExtractionModelTester(unittest.TestCase, PeftCommonTester): r""" Test if the PeftModel behaves as expected. This includes: - test if the model has the expected methods We use parametrized.expand for debugging purposes to test each model individually. """ transformers_class = AutoModel def prepare_inputs_for_testing(self): input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device) attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device) input_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return input_dict @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_model_attr(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs): self._test_adapter_name(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs): self._test_prepare_for_training(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs): self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "task_type": "FEATURE_EXTRACTION", }, ) ) def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs): self._test_merge_layers(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training(self, test_name, model_id, config_cls, config_kwargs): self._test_training(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_pt_tests) ) def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs): self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_training_layer_indexing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_layer_indexing(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_deberta_lora_tests) ) def test_training_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs): self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs): self._test_inference_safetensors(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs): self._test_peft_model_device_map(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_adapter(model_id, config_cls, config_kwargs) @parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID)) def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "adalora_kwargs": {"init_lora_weights": [False]}, "ia3_kwargs": {"init_ia3_weights": [False]}, "task_type": "FEATURE_EXTRACTION", }, ) ) def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs): self._test_unload_adapter(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters( { "model_ids": PEFT_FEATURE_EXTRACTION_MODELS_TO_TEST, "lora_kwargs": {"init_lora_weights": [False]}, "task_type": "FEATURE_EXTRACTION", }, ) ) def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs): self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs) @parameterized.expand( PeftTestConfigManager.get_grid_parameters(FULL_GRID, filter_params_func=skip_non_prompt_tuning) ) def test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs): self._test_passing_input_embeds_works(test_name, model_id, config_cls, config_kwargs)
peft/tests/test_feature_extraction_models.py/0
{ "file_path": "peft/tests/test_feature_extraction_models.py", "repo_id": "peft", "token_count": 3356 }
184
# Getting Started ## Welcome Welcome to the `timm` documentation, a lean set of docs that covers the basics of `timm`. For a more comprehensive set of docs (currently under development), please visit [timmdocs](http://timm.fast.ai) by [Aman Arora](https://github.com/amaarora). ## Install The library can be installed with pip: ``` pip install timm ``` I update the PyPi (pip) packages when I'm confident there are no significant model regressions from previous releases. If you want to pip install the bleeding edge from GitHub, use: ``` pip install git+https://github.com/rwightman/pytorch-image-models.git ``` !!! info "Conda Environment" All development and testing has been done in Conda Python 3 environments on Linux x86-64 systems, specifically 3.7, 3.8, 3.9, 3.10 Little to no care has been taken to be Python 2.x friendly and will not support it. If you run into any challenges running on Windows, or other OS, I'm definitely open to looking into those issues so long as it's in a reproducible (read Conda) environment. PyTorch versions 1.9, 1.10, 1.11 have been tested with the latest versions of this code. I've tried to keep the dependencies minimal, the setup is as per the PyTorch default install instructions for Conda: ``` conda create -n torch-env conda activate torch-env conda install pytorch torchvision cudatoolkit=11.3 -c pytorch conda install pyyaml ``` ## Load a Pretrained Model Pretrained models can be loaded using `timm.create_model` ```python import timm m = timm.create_model('mobilenetv3_large_100', pretrained=True) m.eval() ``` ## List Models with Pretrained Weights ```python import timm from pprint import pprint model_names = timm.list_models(pretrained=True) pprint(model_names) >>> ['adv_inception_v3', 'cspdarknet53', 'cspresnext50', 'densenet121', 'densenet161', 'densenet169', 'densenet201', 'densenetblur121d', 'dla34', 'dla46_c', ... ] ``` ## List Model Architectures by Wildcard ```python import timm from pprint import pprint model_names = timm.list_models('*resne*t*') pprint(model_names) >>> ['cspresnet50', 'cspresnet50d', 'cspresnet50w', 'cspresnext50', ... ] ```
pytorch-image-models/docs/index.md/0
{ "file_path": "pytorch-image-models/docs/index.md", "repo_id": "pytorch-image-models", "token_count": 736 }
185
# EfficientNet (Knapsack Pruned) **EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way. The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image. The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block). This collection consists of pruned EfficientNet models. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{tan2020efficientnet, title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks}, author={Mingxing Tan and Quoc V. Le}, year={2020}, eprint={1905.11946}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` ``` @misc{aflalo2020knapsack, title={Knapsack Pruning with Inner Distillation}, author={Yonathan Aflalo and Asaf Noy and Ming Lin and Itamar Friedman and Lihi Zelnik}, year={2020}, eprint={2002.08258}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- Type: model-index Collections: - Name: EfficientNet Pruned Paper: Title: Knapsack Pruning with Inner Distillation URL: https://paperswithcode.com/paper/knapsack-pruning-with-inner-distillation Models: - Name: efficientnet_b1_pruned In Collection: EfficientNet Pruned Metadata: FLOPs: 489653114 Parameters: 6330000 File Size: 25595162 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b1_pruned Crop Pct: '0.882' Image Size: '240' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1208 Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.25% Top 5 Accuracy: 93.84% - Name: efficientnet_b2_pruned In Collection: EfficientNet Pruned Metadata: FLOPs: 878133915 Parameters: 8310000 File Size: 33555005 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b2_pruned Crop Pct: '0.89' Image Size: '260' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1219 Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.91% Top 5 Accuracy: 94.86% - Name: efficientnet_b3_pruned In Collection: EfficientNet Pruned Metadata: FLOPs: 1239590641 Parameters: 9860000 File Size: 39770812 Architecture: - 1x1 Convolution - Average Pooling - Batch Normalization - Convolution - Dense Connections - Dropout - Inverted Residual Block - Squeeze-and-Excitation Block - Swish Tasks: - Image Classification Training Data: - ImageNet ID: efficientnet_b3_pruned Crop Pct: '0.904' Image Size: '300' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1230 Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 80.86% Top 5 Accuracy: 95.24% -->
pytorch-image-models/docs/models/.templates/models/efficientnet-pruned.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/efficientnet-pruned.md", "repo_id": "pytorch-image-models", "token_count": 1945 }
186
# (Legacy) SE-ResNet **SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @misc{hu2019squeezeandexcitation, title={Squeeze-and-Excitation Networks}, author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu}, year={2019}, eprint={1709.01507}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Legacy SE ResNet Paper: Title: Squeeze-and-Excitation Networks URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks Models: - Name: legacy_seresnet101 In Collection: Legacy SE ResNet Metadata: FLOPs: 9762614000 Parameters: 49330000 File Size: 197822624 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet101 LR: 0.6 Epochs: 100 Layers: 101 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L426 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.38% Top 5 Accuracy: 94.26% - Name: legacy_seresnet152 In Collection: Legacy SE ResNet Metadata: FLOPs: 14553578160 Parameters: 66819999 File Size: 268033864 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet152 LR: 0.6 Epochs: 100 Layers: 152 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L433 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.67% Top 5 Accuracy: 94.38% - Name: legacy_seresnet18 In Collection: Legacy SE ResNet Metadata: FLOPs: 2328876024 Parameters: 11780000 File Size: 47175663 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet18 LR: 0.6 Epochs: 100 Layers: 18 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L405 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 71.74% Top 5 Accuracy: 90.34% - Name: legacy_seresnet34 In Collection: Legacy SE ResNet Metadata: FLOPs: 4706201004 Parameters: 21960000 File Size: 87958697 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet34 LR: 0.6 Epochs: 100 Layers: 34 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 1024 Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L412 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.79% Top 5 Accuracy: 92.13% - Name: legacy_seresnet50 In Collection: Legacy SE ResNet Metadata: FLOPs: 4974351024 Parameters: 28090000 File Size: 112611220 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Techniques: - Label Smoothing - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x NVIDIA Titan X GPUs ID: legacy_seresnet50 LR: 0.6 Epochs: 100 Layers: 50 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Image Size: '224' Interpolation: bilinear Minibatch Size: 1024 Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L419 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.64% Top 5 Accuracy: 93.74% -->
pytorch-image-models/docs/models/.templates/models/legacy-se-resnet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/legacy-se-resnet.md", "repo_id": "pytorch-image-models", "token_count": 2886 }
187
# ResNet **Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks. {% include 'code_snippets.md' %} ## How do I train this model? You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/HeZRS15, author = {Kaiming He and Xiangyu Zhang and Shaoqing Ren and Jian Sun}, title = {Deep Residual Learning for Image Recognition}, journal = {CoRR}, volume = {abs/1512.03385}, year = {2015}, url = {http://arxiv.org/abs/1512.03385}, archivePrefix = {arXiv}, eprint = {1512.03385}, timestamp = {Wed, 17 Apr 2019 17:23:45 +0200}, biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib}, bibsource = {dblp computer science bibliography, https://dblp.org} } ``` <!-- Type: model-index Collections: - Name: ResNet Paper: Title: Deep Residual Learning for Image Recognition URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition Models: - Name: resnet18 In Collection: ResNet Metadata: FLOPs: 2337073152 Parameters: 11690000 File Size: 46827520 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet18 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L641 Weights: https://download.pytorch.org/models/resnet18-5c106cde.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 69.74% Top 5 Accuracy: 89.09% - Name: resnet26 In Collection: ResNet Metadata: FLOPs: 3026804736 Parameters: 16000000 File Size: 64129972 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet26 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L675 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.29% Top 5 Accuracy: 92.57% - Name: resnet34 In Collection: ResNet Metadata: FLOPs: 4718469120 Parameters: 21800000 File Size: 87290831 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet34 Crop Pct: '0.875' Image Size: '224' Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L658 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.11% Top 5 Accuracy: 92.28% - Name: resnet50 In Collection: ResNet Metadata: FLOPs: 5282531328 Parameters: 25560000 File Size: 102488165 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnet50 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L691 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.04% Top 5 Accuracy: 94.39% - Name: resnetblur50 In Collection: ResNet Metadata: FLOPs: 6621606912 Parameters: 25560000 File Size: 102488165 Architecture: - 1x1 Convolution - Batch Normalization - Blur Pooling - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: resnetblur50 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L1160 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.29% Top 5 Accuracy: 94.64% - Name: tv_resnet101 In Collection: ResNet Metadata: FLOPs: 10068547584 Parameters: 44550000 File Size: 178728960 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet101 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L761 Weights: https://download.pytorch.org/models/resnet101-5d3b4d8f.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.37% Top 5 Accuracy: 93.56% - Name: tv_resnet152 In Collection: ResNet Metadata: FLOPs: 14857660416 Parameters: 60190000 File Size: 241530880 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet152 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L769 Weights: https://download.pytorch.org/models/resnet152-b121ed2d.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.32% Top 5 Accuracy: 94.05% - Name: tv_resnet34 In Collection: ResNet Metadata: FLOPs: 4718469120 Parameters: 21800000 File Size: 87306240 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet34 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L745 Weights: https://download.pytorch.org/models/resnet34-333f7ec4.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 73.3% Top 5 Accuracy: 91.42% - Name: tv_resnet50 In Collection: ResNet Metadata: FLOPs: 5282531328 Parameters: 25560000 File Size: 102502400 Architecture: - 1x1 Convolution - Batch Normalization - Bottleneck Residual Block - Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Block - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet ID: tv_resnet50 LR: 0.1 Epochs: 90 Crop Pct: '0.875' LR Gamma: 0.1 Momentum: 0.9 Batch Size: 32 Image Size: '224' LR Step Size: 30 Weight Decay: 0.0001 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L753 Weights: https://download.pytorch.org/models/resnet50-19c8e357.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 76.16% Top 5 Accuracy: 92.88% -->
pytorch-image-models/docs/models/.templates/models/resnet.md/0
{ "file_path": "pytorch-image-models/docs/models/.templates/models/resnet.md", "repo_id": "pytorch-image-models", "token_count": 4271 }
188
# Installation Before you start, you'll need to setup your environment and install the appropriate packages. `timm` is tested on **Python 3+**. ## Virtual Environment You should install `timm` in a [virtual environment](https://docs.python.org/3/library/venv.html) to keep things tidy and avoid dependency conflicts. 1. Create and navigate to your project directory: ```bash mkdir ~/my-project cd ~/my-project ``` 2. Start a virtual environment inside your directory: ```bash python -m venv .env ``` 3. Activate and deactivate the virtual environment with the following commands: ```bash # Activate the virtual environment source .env/bin/activate # Deactivate the virtual environment source .env/bin/deactivate ``` ` Once you've created your virtual environment, you can install `timm` in it. ## Using pip The most straightforward way to install `timm` is with pip: ```bash pip install timm ``` Alternatively, you can install `timm` from GitHub directly to get the latest, bleeding-edge version: ```bash pip install git+https://github.com/rwightman/pytorch-image-models.git ``` Run the following command to check if `timm` has been properly installed: ```bash python -c "from timm import list_models; print(list_models(pretrained=True)[:5])" ``` This command lists the first five pretrained models available in `timm` (which are sorted alphebetically). You should see the following output: ```python ['adv_inception_v3', 'bat_resnext26ts', 'beit_base_patch16_224', 'beit_base_patch16_224_in22k', 'beit_base_patch16_384'] ``` ## From Source Building `timm` from source lets you make changes to the code base. To install from the source, clone the repository and install with the following commands: ```bash git clone https://github.com/rwightman/pytorch-image-models.git cd timm pip install -e . ``` Again, you can check if `timm` was properly installed with the following command: ```bash python -c "from timm import list_models; print(list_models(pretrained=True)[:5])" ```
pytorch-image-models/hfdocs/source/installation.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/installation.mdx", "repo_id": "pytorch-image-models", "token_count": 619 }
189
# FBNet **FBNet** is a type of convolutional neural architectures discovered through [DNAS](https://paperswithcode.com/method/dnas) neural architecture search. It utilises a basic type of image model block inspired by [MobileNetv2](https://paperswithcode.com/method/mobilenetv2) that utilises depthwise convolutions and an inverted residual structure (see components). The principal building block is the [FBNet Block](https://paperswithcode.com/method/fbnet-block). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('fbnetc_100', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `fbnetc_100`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('fbnetc_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{wu2019fbnet, title={FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search}, author={Bichen Wu and Xiaoliang Dai and Peizhao Zhang and Yanghan Wang and Fei Sun and Yiming Wu and Yuandong Tian and Peter Vajda and Yangqing Jia and Kurt Keutzer}, year={2019}, eprint={1812.03443}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: FBNet Paper: Title: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search' URL: https://paperswithcode.com/paper/fbnet-hardware-aware-efficient-convnet-design Models: - Name: fbnetc_100 In Collection: FBNet Metadata: FLOPs: 508940064 Parameters: 5570000 File Size: 22525094 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Dropout - FBNet Block - Global Average Pooling - Softmax Tasks: - Image Classification Training Techniques: - SGD with Momentum - Weight Decay Training Data: - ImageNet Training Resources: 8x GPUs ID: fbnetc_100 LR: 0.1 Epochs: 360 Layers: 22 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 256 Image Size: '224' Weight Decay: 0.0005 Interpolation: bilinear Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L985 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.12% Top 5 Accuracy: 92.37% -->
pytorch-image-models/hfdocs/source/models/fbnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/fbnet.mdx", "repo_id": "pytorch-image-models", "token_count": 1705 }
190
# MnasNet **MnasNet** is a type of convolutional neural network optimized for mobile devices that is discovered through mobile neural architecture search, which explicitly incorporates model latency into the main objective so that the search can identify a model that achieves a good trade-off between accuracy and latency. The main building block is an [inverted residual block](https://paperswithcode.com/method/inverted-residual-block) (from [MobileNetV2](https://paperswithcode.com/method/mobilenetv2)). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('mnasnet_100', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `mnasnet_100`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('mnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @misc{tan2019mnasnet, title={MnasNet: Platform-Aware Neural Architecture Search for Mobile}, author={Mingxing Tan and Bo Chen and Ruoming Pang and Vijay Vasudevan and Mark Sandler and Andrew Howard and Quoc V. Le}, year={2019}, eprint={1807.11626}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: MNASNet Paper: Title: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile' URL: https://paperswithcode.com/paper/mnasnet-platform-aware-neural-architecture Models: - Name: mnasnet_100 In Collection: MNASNet Metadata: FLOPs: 416415488 Parameters: 4380000 File Size: 17731774 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Global Average Pooling - Inverted Residual Block - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Techniques: - RMSProp - Weight Decay Training Data: - ImageNet ID: mnasnet_100 Layers: 100 Dropout: 0.2 Crop Pct: '0.875' Momentum: 0.9 Batch Size: 4000 Image Size: '224' Interpolation: bicubic RMSProp Decay: 0.9 Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L894 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 74.67% Top 5 Accuracy: 92.1% - Name: semnasnet_100 In Collection: MNASNet Metadata: FLOPs: 414570766 Parameters: 3890000 File Size: 15731489 Architecture: - 1x1 Convolution - Batch Normalization - Convolution - Depthwise Separable Convolution - Dropout - Global Average Pooling - Inverted Residual Block - Max Pooling - ReLU - Residual Connection - Softmax - Squeeze-and-Excitation Block Tasks: - Image Classification Training Data: - ImageNet ID: semnasnet_100 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L928 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 75.45% Top 5 Accuracy: 92.61% -->
pytorch-image-models/hfdocs/source/models/mnasnet.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/mnasnet.mdx", "repo_id": "pytorch-image-models", "token_count": 2101 }
191
# SelecSLS **SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy. ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('selecsls42b', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `selecsls42b`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('selecsls42b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @article{Mehta_2020, title={XNect}, volume={39}, ISSN={1557-7368}, url={http://dx.doi.org/10.1145/3386569.3392410}, DOI={10.1145/3386569.3392410}, number={4}, journal={ACM Transactions on Graphics}, publisher={Association for Computing Machinery (ACM)}, author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian}, year={2020}, month={Jul} } ``` <!-- Type: model-index Collections: - Name: SelecSLS Paper: Title: 'XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera' URL: https://paperswithcode.com/paper/xnect-real-time-multi-person-3d-human-pose Models: - Name: selecsls42b In Collection: SelecSLS Metadata: FLOPs: 3824022528 Parameters: 32460000 File Size: 129948954 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls42b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L335 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.18% Top 5 Accuracy: 93.39% - Name: selecsls60 In Collection: SelecSLS Metadata: FLOPs: 4610472600 Parameters: 30670000 File Size: 122839714 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls60 Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L342 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 77.99% Top 5 Accuracy: 93.83% - Name: selecsls60b In Collection: SelecSLS Metadata: FLOPs: 4657653144 Parameters: 32770000 File Size: 131252898 Architecture: - Batch Normalization - Convolution - Dense Connections - Dropout - Global Average Pooling - ReLU - SelecSLS Block Tasks: - Image Classification Training Techniques: - Cosine Annealing - Random Erasing Training Data: - ImageNet ID: selecsls60b Crop Pct: '0.875' Image Size: '224' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L349 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.41% Top 5 Accuracy: 94.18% -->
pytorch-image-models/hfdocs/source/models/selecsls.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/selecsls.mdx", "repo_id": "pytorch-image-models", "token_count": 2420 }
192
# Xception **Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution layers](https://paperswithcode.com/method/depthwise-separable-convolution). The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models). ## How do I use this model on an image? To load a pretrained model: ```py >>> import timm >>> model = timm.create_model('xception', pretrained=True) >>> model.eval() ``` To load and preprocess the image: ```py >>> import urllib >>> from PIL import Image >>> from timm.data import resolve_data_config >>> from timm.data.transforms_factory import create_transform >>> config = resolve_data_config({}, model=model) >>> transform = create_transform(**config) >>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") >>> urllib.request.urlretrieve(url, filename) >>> img = Image.open(filename).convert('RGB') >>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension ``` To get the model predictions: ```py >>> import torch >>> with torch.no_grad(): ... out = model(tensor) >>> probabilities = torch.nn.functional.softmax(out[0], dim=0) >>> print(probabilities.shape) >>> # prints: torch.Size([1000]) ``` To get the top-5 predictions class names: ```py >>> # Get imagenet class mappings >>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt") >>> urllib.request.urlretrieve(url, filename) >>> with open("imagenet_classes.txt", "r") as f: ... categories = [s.strip() for s in f.readlines()] >>> # Print top categories per image >>> top5_prob, top5_catid = torch.topk(probabilities, 5) >>> for i in range(top5_prob.size(0)): ... print(categories[top5_catid[i]], top5_prob[i].item()) >>> # prints class names and probabilities like: >>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)] ``` Replace the model name with the variant you want to use, e.g. `xception`. You can find the IDs in the model summaries at the top of this page. To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use. ## How do I finetune this model? You can finetune any of the pre-trained models just by changing the classifier (the last layer). ```py >>> model = timm.create_model('xception', pretrained=True, num_classes=NUM_FINETUNE_CLASSES) ``` To finetune on your own dataset, you have to write a training loop or adapt [timm's training script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset. ## How do I train this model? You can follow the [timm recipe scripts](../scripts) for training a new model afresh. ## Citation ```BibTeX @article{DBLP:journals/corr/ZagoruykoK16, @misc{chollet2017xception, title={Xception: Deep Learning with Depthwise Separable Convolutions}, author={François Chollet}, year={2017}, eprint={1610.02357}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- Type: model-index Collections: - Name: Xception Paper: Title: 'Xception: Deep Learning with Depthwise Separable Convolutions' URL: https://paperswithcode.com/paper/xception-deep-learning-with-depthwise Models: - Name: xception In Collection: Xception Metadata: FLOPs: 10600506792 Parameters: 22860000 File Size: 91675053 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception Crop Pct: '0.897' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception.py#L229 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.05% Top 5 Accuracy: 94.4% - Name: xception41 In Collection: Xception Metadata: FLOPs: 11681983232 Parameters: 26970000 File Size: 108422028 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception41 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L181 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 78.54% Top 5 Accuracy: 94.28% - Name: xception65 In Collection: Xception Metadata: FLOPs: 17585702144 Parameters: 39920000 File Size: 160536780 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception65 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L200 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_65-c9ae96e8.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.55% Top 5 Accuracy: 94.66% - Name: xception71 In Collection: Xception Metadata: FLOPs: 22817346560 Parameters: 42340000 File Size: 170295556 Architecture: - 1x1 Convolution - Convolution - Dense Connections - Depthwise Separable Convolution - Global Average Pooling - Max Pooling - ReLU - Residual Connection - Softmax Tasks: - Image Classification Training Data: - ImageNet ID: xception71 Crop Pct: '0.903' Image Size: '299' Interpolation: bicubic Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L219 Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth Results: - Task: Image Classification Dataset: ImageNet Metrics: Top 1 Accuracy: 79.88% Top 5 Accuracy: 94.93% -->
pytorch-image-models/hfdocs/source/models/xception.mdx/0
{ "file_path": "pytorch-image-models/hfdocs/source/models/xception.mdx", "repo_id": "pytorch-image-models", "token_count": 2674 }
193
from .auto_augment import RandAugment, AutoAugment, rand_augment_ops, auto_augment_policy,\ rand_augment_transform, auto_augment_transform from .config import resolve_data_config, resolve_model_data_config from .constants import * from .dataset import ImageDataset, IterableImageDataset, AugMixDataset from .dataset_factory import create_dataset from .dataset_info import DatasetInfo, CustomDatasetInfo from .imagenet_info import ImageNetInfo, infer_imagenet_subset from .loader import create_loader from .mixup import Mixup, FastCollateMixup from .readers import create_reader from .readers import get_img_extensions, is_img_extension, set_img_extensions, add_img_extensions, del_img_extensions from .real_labels import RealLabelsImagenet from .transforms import * from .transforms_factory import create_transform
pytorch-image-models/timm/data/__init__.py/0
{ "file_path": "pytorch-image-models/timm/data/__init__.py", "repo_id": "pytorch-image-models", "token_count": 256 }
194
import os import pickle def load_class_map(map_or_filename, root=''): if isinstance(map_or_filename, dict): assert dict, 'class_map dict must be non-empty' return map_or_filename class_map_path = map_or_filename if not os.path.exists(class_map_path): class_map_path = os.path.join(root, class_map_path) assert os.path.exists(class_map_path), 'Cannot locate specified class map file (%s)' % map_or_filename class_map_ext = os.path.splitext(map_or_filename)[-1].lower() if class_map_ext == '.txt': with open(class_map_path) as f: class_to_idx = {v.strip(): k for k, v in enumerate(f)} elif class_map_ext == '.pkl': with open(class_map_path, 'rb') as f: class_to_idx = pickle.load(f) else: assert False, f'Unsupported class map file extension ({class_map_ext}).' return class_to_idx
pytorch-image-models/timm/data/readers/class_map.py/0
{ "file_path": "pytorch-image-models/timm/data/readers/class_map.py", "repo_id": "pytorch-image-models", "token_count": 387 }
195
from .activations import * from .adaptive_avgmax_pool import \ adaptive_avgmax_pool2d, select_adaptive_pool2d, AdaptiveAvgMaxPool2d, SelectAdaptivePool2d from .attention_pool import AttentionPoolLatent from .attention_pool2d import AttentionPool2d, RotAttentionPool2d, RotaryEmbedding from .blur_pool import BlurPool2d from .classifier import ClassifierHead, create_classifier, NormMlpClassifierHead from .cond_conv2d import CondConv2d, get_condconv_initializer from .config import is_exportable, is_scriptable, is_no_jit, use_fused_attn, \ set_exportable, set_scriptable, set_no_jit, set_layer_config, set_fused_attn from .conv2d_same import Conv2dSame, conv2d_same from .conv_bn_act import ConvNormAct, ConvNormActAa, ConvBnAct from .create_act import create_act_layer, get_act_layer, get_act_fn from .create_attn import get_attn, create_attn from .create_conv2d import create_conv2d from .create_norm import get_norm_layer, create_norm_layer from .create_norm_act import get_norm_act_layer, create_norm_act_layer, get_norm_act_layer from .drop import DropBlock2d, DropPath, drop_block_2d, drop_path from .eca import EcaModule, CecaModule, EfficientChannelAttn, CircularEfficientChannelAttn from .evo_norm import EvoNorm2dB0, EvoNorm2dB1, EvoNorm2dB2,\ EvoNorm2dS0, EvoNorm2dS0a, EvoNorm2dS1, EvoNorm2dS1a, EvoNorm2dS2, EvoNorm2dS2a from .fast_norm import is_fast_norm, set_fast_norm, fast_group_norm, fast_layer_norm from .filter_response_norm import FilterResponseNormTlu2d, FilterResponseNormAct2d from .format import Format, get_channel_dim, get_spatial_dim, nchw_to, nhwc_to from .gather_excite import GatherExcite from .global_context import GlobalContext from .grid import ndgrid, meshgrid from .helpers import to_ntuple, to_2tuple, to_3tuple, to_4tuple, make_divisible, extend_tuple from .inplace_abn import InplaceAbn from .linear import Linear from .mixed_conv2d import MixedConv2d from .mlp import Mlp, GluMlp, GatedMlp, SwiGLU, SwiGLUPacked, ConvMlp, GlobalResponseNormMlp from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .norm import GroupNorm, GroupNorm1, LayerNorm, LayerNorm2d, RmsNorm from .norm_act import BatchNormAct2d, GroupNormAct, GroupNorm1Act, LayerNormAct, LayerNormAct2d,\ SyncBatchNormAct, convert_sync_batchnorm, FrozenBatchNormAct2d, freeze_batch_norm_2d, unfreeze_batch_norm_2d from .padding import get_padding, get_same_padding, pad_same from .patch_dropout import PatchDropout from .patch_embed import PatchEmbed, PatchEmbedWithSize, resample_patch_embed from .pool2d_same import AvgPool2dSame, create_pool2d from .pos_embed import resample_abs_pos_embed, resample_abs_pos_embed_nhwc from .pos_embed_rel import RelPosMlp, RelPosBias, RelPosBiasTf, gen_relative_position_index, gen_relative_log_coords, \ resize_rel_pos_bias_table, resize_rel_pos_bias_table_simple, resize_rel_pos_bias_table_levit from .pos_embed_sincos import pixel_freq_bands, freq_bands, build_sincos2d_pos_embed, build_fourier_pos_embed, \ build_rotary_pos_embed, apply_rot_embed, apply_rot_embed_cat, apply_rot_embed_list, apply_keep_indices_nlc, \ FourierEmbed, RotaryEmbedding, RotaryEmbeddingCat from .squeeze_excite import SEModule, SqueezeExcite, EffectiveSEModule, EffectiveSqueezeExcite from .selective_kernel import SelectiveKernel from .separable_conv import SeparableConv2d, SeparableConvNormAct from .space_to_depth import SpaceToDepthModule, SpaceToDepth, DepthToSpace from .split_attn import SplitAttn from .split_batchnorm import SplitBatchNorm2d, convert_splitbn_model from .std_conv import StdConv2d, StdConv2dSame, ScaledStdConv2d, ScaledStdConv2dSame from .test_time_pool import TestTimePoolHead, apply_test_time_pool from .trace_utils import _assert, _float_to_int from .typing import LayerType, PadType from .weight_init import trunc_normal_, trunc_normal_tf_, variance_scaling_, lecun_normal_
pytorch-image-models/timm/layers/__init__.py/0
{ "file_path": "pytorch-image-models/timm/layers/__init__.py", "repo_id": "pytorch-image-models", "token_count": 1381 }
196
""" Attention Factory Hacked together by / Copyright 2021 Ross Wightman """ import torch from functools import partial from .bottleneck_attn import BottleneckAttn from .cbam import CbamModule, LightCbamModule from .eca import EcaModule, CecaModule from .gather_excite import GatherExcite from .global_context import GlobalContext from .halo_attn import HaloAttn from .lambda_layer import LambdaLayer from .non_local_attn import NonLocalAttn, BatNonLocalAttn from .selective_kernel import SelectiveKernel from .split_attn import SplitAttn from .squeeze_excite import SEModule, EffectiveSEModule def get_attn(attn_type): if isinstance(attn_type, torch.nn.Module): return attn_type module_cls = None if attn_type: if isinstance(attn_type, str): attn_type = attn_type.lower() # Lightweight attention modules (channel and/or coarse spatial). # Typically added to existing network architecture blocks in addition to existing convolutions. if attn_type == 'se': module_cls = SEModule elif attn_type == 'ese': module_cls = EffectiveSEModule elif attn_type == 'eca': module_cls = EcaModule elif attn_type == 'ecam': module_cls = partial(EcaModule, use_mlp=True) elif attn_type == 'ceca': module_cls = CecaModule elif attn_type == 'ge': module_cls = GatherExcite elif attn_type == 'gc': module_cls = GlobalContext elif attn_type == 'gca': module_cls = partial(GlobalContext, fuse_add=True, fuse_scale=False) elif attn_type == 'cbam': module_cls = CbamModule elif attn_type == 'lcbam': module_cls = LightCbamModule # Attention / attention-like modules w/ significant params # Typically replace some of the existing workhorse convs in a network architecture. # All of these accept a stride argument and can spatially downsample the input. elif attn_type == 'sk': module_cls = SelectiveKernel elif attn_type == 'splat': module_cls = SplitAttn # Self-attention / attention-like modules w/ significant compute and/or params # Typically replace some of the existing workhorse convs in a network architecture. # All of these accept a stride argument and can spatially downsample the input. elif attn_type == 'lambda': return LambdaLayer elif attn_type == 'bottleneck': return BottleneckAttn elif attn_type == 'halo': return HaloAttn elif attn_type == 'nl': module_cls = NonLocalAttn elif attn_type == 'bat': module_cls = BatNonLocalAttn # Woops! else: assert False, "Invalid attn module (%s)" % attn_type elif isinstance(attn_type, bool): if attn_type: module_cls = SEModule else: module_cls = attn_type return module_cls def create_attn(attn_type, channels, **kwargs): module_cls = get_attn(attn_type) if module_cls is not None: # NOTE: it's expected the first (positional) argument of all attention layers is the # input channels return module_cls(channels, **kwargs) return None
pytorch-image-models/timm/layers/create_attn.py/0
{ "file_path": "pytorch-image-models/timm/layers/create_attn.py", "repo_id": "pytorch-image-models", "token_count": 1588 }
197
import torch from torch import nn as nn try: from inplace_abn.functions import inplace_abn, inplace_abn_sync has_iabn = True except ImportError: has_iabn = False def inplace_abn(x, weight, bias, running_mean, running_var, training=True, momentum=0.1, eps=1e-05, activation="leaky_relu", activation_param=0.01): raise ImportError( "Please install InplaceABN:'pip install git+https://github.com/mapillary/[email protected]'") def inplace_abn_sync(**kwargs): inplace_abn(**kwargs) class InplaceAbn(nn.Module): """Activated Batch Normalization This gathers a BatchNorm and an activation function in a single module Parameters ---------- num_features : int Number of feature channels in the input and output. eps : float Small constant to prevent numerical issues. momentum : float Momentum factor applied to compute running statistics. affine : bool If `True` apply learned scale and shift transformation after normalization. act_layer : str or nn.Module type Name or type of the activation functions, one of: `leaky_relu`, `elu` act_param : float Negative slope for the `leaky_relu` activation. """ def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, apply_act=True, act_layer="leaky_relu", act_param=0.01, drop_layer=None): super(InplaceAbn, self).__init__() self.num_features = num_features self.affine = affine self.eps = eps self.momentum = momentum if apply_act: if isinstance(act_layer, str): assert act_layer in ('leaky_relu', 'elu', 'identity', '') self.act_name = act_layer if act_layer else 'identity' else: # convert act layer passed as type to string if act_layer == nn.ELU: self.act_name = 'elu' elif act_layer == nn.LeakyReLU: self.act_name = 'leaky_relu' elif act_layer is None or act_layer == nn.Identity: self.act_name = 'identity' else: assert False, f'Invalid act layer {act_layer.__name__} for IABN' else: self.act_name = 'identity' self.act_param = act_param if self.affine: self.weight = nn.Parameter(torch.ones(num_features)) self.bias = nn.Parameter(torch.zeros(num_features)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.register_buffer('running_mean', torch.zeros(num_features)) self.register_buffer('running_var', torch.ones(num_features)) self.reset_parameters() def reset_parameters(self): nn.init.constant_(self.running_mean, 0) nn.init.constant_(self.running_var, 1) if self.affine: nn.init.constant_(self.weight, 1) nn.init.constant_(self.bias, 0) def forward(self, x): output = inplace_abn( x, self.weight, self.bias, self.running_mean, self.running_var, self.training, self.momentum, self.eps, self.act_name, self.act_param) if isinstance(output, tuple): output = output[0] return output
pytorch-image-models/timm/layers/inplace_abn.py/0
{ "file_path": "pytorch-image-models/timm/layers/inplace_abn.py", "repo_id": "pytorch-image-models", "token_count": 1556 }
198
""" Relative position embedding modules and functions Hacked together by / Copyright 2022 Ross Wightman """ import math import os from typing import Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from .grid import ndgrid from .interpolate import RegularGridInterpolator from .mlp import Mlp from .weight_init import trunc_normal_ _USE_SCIPY = int(os.environ.get('TIMM_USE_SCIPY_INTERP', 0)) > 0 def gen_relative_position_index( q_size: Tuple[int, int], k_size: Optional[Tuple[int, int]] = None, class_token: bool = False, ) -> torch.Tensor: # Adapted with significant modifications from Swin / BeiT codebases # get pair-wise relative position index for each token inside the window assert k_size is None, 'Different q & k sizes not currently supported' # FIXME coords = torch.stack(ndgrid(torch.arange(q_size[0]), torch.arange(q_size[1]))).flatten(1) # 2, Wh, Ww relative_coords = coords[:, :, None] - coords[:, None, :] # 2, Wh*Ww, Wh*Ww relative_coords = relative_coords.permute(1, 2, 0) # Qh*Qw, Kh*Kw, 2 relative_coords[:, :, 0] += q_size[0] - 1 # shift to start from 0 relative_coords[:, :, 1] += q_size[1] - 1 relative_coords[:, :, 0] *= 2 * q_size[1] - 1 num_relative_distance = (2 * q_size[0] - 1) * (2 * q_size[1] - 1) # else: # # FIXME different q vs k sizes is a WIP, need to better offset the two grids? # q_coords = torch.stack( # ndgrid( # torch.arange(q_size[0]), # torch.arange(q_size[1]) # ) # ).flatten(1) # 2, Wh, Ww # k_coords = torch.stack( # ndgrid( # torch.arange(k_size[0]), # torch.arange(k_size[1]) # ) # ).flatten(1) # relative_coords = q_coords[:, :, None] - k_coords[:, None, :] # 2, Wh*Ww, Wh*Ww # relative_coords = relative_coords.permute(1, 2, 0) # Qh*Qw, Kh*Kw, 2 # relative_coords[:, :, 0] += max(q_size[0], k_size[0]) - 1 # shift to start from 0 # relative_coords[:, :, 1] += max(q_size[1], k_size[1]) - 1 # relative_coords[:, :, 0] *= k_size[1] + q_size[1] - 1 # relative_position_index = relative_coords.sum(-1) # Qh*Qw, Kh*Kw # num_relative_distance = (q_size[0] + k_size[0] - 1) * (q_size[1] + k_size[1] - 1) + 3 relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww if class_token: # handle cls to token & token 2 cls & cls to cls as per beit for rel pos bias # NOTE not intended or tested with MLP log-coords relative_position_index = F.pad(relative_position_index, [1, 0, 1, 0]) relative_position_index[0, 0:] = num_relative_distance relative_position_index[0:, 0] = num_relative_distance + 1 relative_position_index[0, 0] = num_relative_distance + 2 return relative_position_index.contiguous() def resize_rel_pos_bias_table_simple( rel_pos_bias, new_window_size: Tuple[int, int], new_bias_shape: Tuple[int, ...], ): dst_size = (new_window_size[0] * 2 - 1, new_window_size[1] * 2 - 1) if rel_pos_bias.ndim == 3: # TF maxvit style (num_heads, H, W) bias shape, no extra tokens currently supported _, dst_h, dst_w = new_bias_shape num_attn_heads, src_h, src_w = rel_pos_bias.shape assert dst_h == dst_size[0] and dst_w == dst_size[1] if src_h != dst_h or src_w != dst_w: rel_pos_bias = torch.nn.functional.interpolate( rel_pos_bias.unsqueeze(0), size=dst_size, mode="bicubic", align_corners=False, ).squeeze(0) else: assert rel_pos_bias.ndim == 2 # (num_pos, num_heads) (aka flat) bias shape dst_num_pos, _ = new_bias_shape src_num_pos, num_attn_heads = rel_pos_bias.shape num_extra_tokens = dst_num_pos - (dst_size[0] * dst_size[1]) src_size = int((src_num_pos - num_extra_tokens) ** 0.5) src_size = (src_size, src_size) # FIXME could support non-equal src if argument passed if src_size[0] != dst_size[0] or src_size[1] != dst_size[1]: if num_extra_tokens: extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] else: extra_tokens = None rel_pos_bias = torch.nn.functional.interpolate( rel_pos_bias.transpose(1, 0).reshape((1, -1, src_size[0], src_size[1])), size=dst_size, mode="bicubic", align_corners=False, ).view(-1, dst_num_pos - num_extra_tokens).transpose(0, 1) if extra_tokens is not None: rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) return rel_pos_bias def resize_rel_pos_bias_table_levit( position_bias_table, new_size, interpolation: str = 'bicubic', antialias: bool = True, ): """ Resample relative position bias table suggested in LeVit Adapted from: https://github.com/microsoft/Cream/blob/main/TinyViT/utils.py """ L1, nH1 = position_bias_table.size() L2, nH2 = new_size assert nH1 == nH2 if L1 != L2: orig_dtype = position_bias_table.dtype position_bias_table = position_bias_table.float() # bicubic interpolate relative_position_bias_table if not match S1 = int(L1 ** 0.5) S2 = int(L2 ** 0.5) relative_position_bias_table_resized = F.interpolate( position_bias_table.permute(1, 0).view(1, nH1, S1, S1), size=(S2, S2), mode=interpolation, antialias=antialias) relative_position_bias_table_resized = \ relative_position_bias_table_resized.view(nH2, L2).permute(1, 0) relative_position_bias_table_resized.to(orig_dtype) return relative_position_bias_table_resized else: return position_bias_table def resize_rel_pos_bias_table( rel_pos_bias, new_window_size: Tuple[int, int], new_bias_shape: Tuple[int, ...], ): """ Resize relative position bias table using more advanced interpolation. Modified from code in Microsoft Unilm (https://github.com/microsoft/unilm) repo (BeiT, BeiT-v2, etc). https://github.com/microsoft/unilm/blob/5255d52de86dad642810f5849dd357769346c1d7/beit/run_class_finetuning.py#L351 Args: rel_pos_bias: new_window_size: new_bias_shape: Returns: """ if _USE_SCIPY: from scipy import interpolate dst_size = (new_window_size[0] * 2 - 1, new_window_size[1] * 2 - 1) if rel_pos_bias.ndim == 3: # TF maxvit style (num_heads, H, W) bias shape, no extra tokens currently supported num_extra_tokens = 0 _, dst_h, dst_w = new_bias_shape assert dst_h == dst_size[0] and dst_w == dst_size[1] num_attn_heads, src_h, src_w = rel_pos_bias.shape src_size = (src_h, src_w) has_flat_shape = False else: assert rel_pos_bias.ndim == 2 # (num_pos, num_heads) (aka flat) bias shape dst_num_pos, _ = new_bias_shape src_num_pos, num_attn_heads = rel_pos_bias.shape num_extra_tokens = dst_num_pos - (dst_size[0] * dst_size[1]) src_size = int((src_num_pos - num_extra_tokens) ** 0.5) src_size = (src_size, src_size) has_flat_shape = True if src_size[0] != dst_size[0] or src_size[1] != dst_size[1]: # print("Interpolating position from %dx%d to %dx%d" % (src_size[0], src_size[1], dst_size[0], dst_size[1])) if num_extra_tokens: extra_tokens = rel_pos_bias[-num_extra_tokens:, :] rel_pos_bias = rel_pos_bias[:-num_extra_tokens, :] else: extra_tokens = None def geometric_progression(a, r, n): return a * (1.0 - r ** n) / (1.0 - r) def _calc(src, dst): left, right = 1.01, 1.5 while right - left > 1e-6: q = (left + right) / 2.0 gp = geometric_progression(1, q, src // 2) if gp > dst // 2: right = q else: left = q dis = [] cur = 1 for i in range(src // 2): dis.append(cur) cur += q ** (i + 1) r_ids = [-_ for _ in reversed(dis)] return r_ids + [0] + dis y = _calc(src_size[0], dst_size[0]) x = _calc(src_size[1], dst_size[1]) yx = [torch.tensor(y), torch.tensor(x)] # print("Original positions = %s" % str(x)) ty = dst_size[0] // 2.0 tx = dst_size[1] // 2.0 dy = torch.arange(-ty, ty + 0.1, 1.0) dx = torch.arange(-tx, tx + 0.1, 1.0) dyx = ndgrid(dy, dx) # print("Target positions = %s" % str(dx)) all_rel_pos_bias = [] for i in range(num_attn_heads): if has_flat_shape: z = rel_pos_bias[:, i].view(src_size[0], src_size[1]).float() else: z = rel_pos_bias[i, :, :].float() if _USE_SCIPY: # Original beit code uses scipy w/ cubic interpolation f = interpolate.interp2d(x, y, z.numpy(), kind='cubic') r = torch.Tensor(f(dx, dy)).contiguous().to(rel_pos_bias.device) else: # Without scipy dependency, I've found a reasonably simple impl # that supports uneven spaced interpolation pts with 'linear' interp. # Results are comparable to scipy for model accuracy in most cases. f = RegularGridInterpolator(yx, z) r = f(dyx).contiguous().to(rel_pos_bias.device) if has_flat_shape: r = r.view(-1, 1) all_rel_pos_bias.append(r) if has_flat_shape: rel_pos_bias = torch.cat(all_rel_pos_bias, dim=-1) else: rel_pos_bias = torch.cat(all_rel_pos_bias, dim=0) if extra_tokens is not None: assert has_flat_shape rel_pos_bias = torch.cat((rel_pos_bias, extra_tokens), dim=0) return rel_pos_bias class RelPosBias(nn.Module): """ Relative Position Bias Adapted from Swin-V1 relative position bias impl, modularized. """ def __init__(self, window_size, num_heads, prefix_tokens=0): super().__init__() assert prefix_tokens <= 1 self.window_size = window_size self.window_area = window_size[0] * window_size[1] self.bias_shape = (self.window_area + prefix_tokens,) * 2 + (num_heads,) num_relative_distance = (2 * window_size[0] - 1) * (2 * window_size[1] - 1) + 3 * prefix_tokens self.relative_position_bias_table = nn.Parameter(torch.zeros(num_relative_distance, num_heads)) self.register_buffer( "relative_position_index", gen_relative_position_index(self.window_size, class_token=prefix_tokens > 0).view(-1), persistent=False, ) self.init_weights() def init_weights(self): trunc_normal_(self.relative_position_bias_table, std=.02) def get_bias(self) -> torch.Tensor: relative_position_bias = self.relative_position_bias_table[self.relative_position_index] # win_h * win_w, win_h * win_w, num_heads relative_position_bias = relative_position_bias.view(self.bias_shape).permute(2, 0, 1) return relative_position_bias.unsqueeze(0).contiguous() def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): return attn + self.get_bias() def gen_relative_log_coords( win_size: Tuple[int, int], pretrained_win_size: Tuple[int, int] = (0, 0), mode='swin', ): assert mode in ('swin', 'cr') # as per official swin-v2 impl, supporting timm specific 'cr' log coords as well relative_coords_h = torch.arange(-(win_size[0] - 1), win_size[0]).to(torch.float32) relative_coords_w = torch.arange(-(win_size[1] - 1), win_size[1]).to(torch.float32) relative_coords_table = torch.stack(ndgrid(relative_coords_h, relative_coords_w)) relative_coords_table = relative_coords_table.permute(1, 2, 0).contiguous() # 2*Wh-1, 2*Ww-1, 2 if mode == 'swin': if pretrained_win_size[0] > 0: relative_coords_table[:, :, 0] /= (pretrained_win_size[0] - 1) relative_coords_table[:, :, 1] /= (pretrained_win_size[1] - 1) else: relative_coords_table[:, :, 0] /= (win_size[0] - 1) relative_coords_table[:, :, 1] /= (win_size[1] - 1) relative_coords_table *= 8 # normalize to -8, 8 relative_coords_table = torch.sign(relative_coords_table) * torch.log2( 1.0 + relative_coords_table.abs()) / math.log2(8) else: # mode == 'cr' relative_coords_table = torch.sign(relative_coords_table) * torch.log( 1.0 + relative_coords_table.abs()) return relative_coords_table class RelPosMlp(nn.Module): """ Log-Coordinate Relative Position MLP Based on ideas presented in Swin-V2 paper (https://arxiv.org/abs/2111.09883) This impl covers the 'swin' implementation as well as two timm specific modes ('cr', and 'rw') """ def __init__( self, window_size, num_heads=8, hidden_dim=128, prefix_tokens=0, mode='cr', pretrained_window_size=(0, 0) ): super().__init__() self.window_size = window_size self.window_area = self.window_size[0] * self.window_size[1] self.prefix_tokens = prefix_tokens self.num_heads = num_heads self.bias_shape = (self.window_area,) * 2 + (num_heads,) if mode == 'swin': self.bias_act = nn.Sigmoid() self.bias_gain = 16 mlp_bias = (True, False) else: self.bias_act = nn.Identity() self.bias_gain = None mlp_bias = True self.mlp = Mlp( 2, # x, y hidden_features=hidden_dim, out_features=num_heads, act_layer=nn.ReLU, bias=mlp_bias, drop=(0.125, 0.) ) self.register_buffer( "relative_position_index", gen_relative_position_index(window_size).view(-1), persistent=False) # get relative_coords_table self.register_buffer( "rel_coords_log", gen_relative_log_coords(window_size, pretrained_window_size, mode=mode), persistent=False) def get_bias(self) -> torch.Tensor: relative_position_bias = self.mlp(self.rel_coords_log) if self.relative_position_index is not None: relative_position_bias = relative_position_bias.view(-1, self.num_heads)[self.relative_position_index] relative_position_bias = relative_position_bias.view(self.bias_shape) relative_position_bias = relative_position_bias.permute(2, 0, 1) relative_position_bias = self.bias_act(relative_position_bias) if self.bias_gain is not None: relative_position_bias = self.bias_gain * relative_position_bias if self.prefix_tokens: relative_position_bias = F.pad(relative_position_bias, [self.prefix_tokens, 0, self.prefix_tokens, 0]) return relative_position_bias.unsqueeze(0).contiguous() def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): return attn + self.get_bias() def generate_lookup_tensor( length: int, max_relative_position: Optional[int] = None, ): """Generate a one_hot lookup tensor to reindex embeddings along one dimension. Args: length: the length to reindex to. max_relative_position: the maximum relative position to consider. Relative position embeddings for distances above this threshold are zeroed out. Returns: a lookup Tensor of size [length, length, vocab_size] that satisfies ret[n,m,v] = 1{m - n + max_relative_position = v}. """ if max_relative_position is None: max_relative_position = length - 1 # Return the cached lookup tensor, otherwise compute it and cache it. vocab_size = 2 * max_relative_position + 1 ret = torch.zeros(length, length, vocab_size) for i in range(length): for x in range(length): v = x - i + max_relative_position if abs(x - i) > max_relative_position: continue ret[i, x, v] = 1 return ret def reindex_2d_einsum_lookup( relative_position_tensor, height: int, width: int, height_lookup: torch.Tensor, width_lookup: torch.Tensor, ) -> torch.Tensor: """Reindex 2d relative position bias with 2 independent einsum lookups. Adapted from: https://github.com/google-research/maxvit/blob/2e06a7f1f70c76e64cd3dabe5cd1b8c1a23c9fb7/maxvit/models/attention_utils.py Args: relative_position_tensor: tensor of shape [..., vocab_height, vocab_width, ...]. height: height to reindex to. width: width to reindex to. height_lookup: one-hot height lookup width_lookup: one-hot width lookup Returns: reindexed_tensor: a Tensor of shape [..., height * width, height * width, ...] """ reindexed_tensor = torch.einsum('nhw,ixh->nixw', relative_position_tensor, height_lookup) reindexed_tensor = torch.einsum('nixw,jyw->nijxy', reindexed_tensor, width_lookup) area = height * width return reindexed_tensor.reshape(relative_position_tensor.shape[0], area, area) class RelPosBiasTf(nn.Module): """ Relative Position Bias Impl (Compatible with Tensorflow MaxViT models) Adapted from: https://github.com/google-research/maxvit/blob/2e06a7f1f70c76e64cd3dabe5cd1b8c1a23c9fb7/maxvit/models/attention_utils.py """ def __init__(self, window_size, num_heads, prefix_tokens=0): super().__init__() assert prefix_tokens <= 1 self.window_size = window_size self.window_area = window_size[0] * window_size[1] self.num_heads = num_heads vocab_height = 2 * window_size[0] - 1 vocab_width = 2 * window_size[1] - 1 self.bias_shape = (self.num_heads, vocab_height, vocab_width) self.relative_position_bias_table = nn.Parameter(torch.zeros(self.bias_shape)) self.register_buffer('height_lookup', generate_lookup_tensor(window_size[0]), persistent=False) self.register_buffer('width_lookup', generate_lookup_tensor(window_size[1]), persistent=False) self.init_weights() def init_weights(self): nn.init.normal_(self.relative_position_bias_table, std=.02) def get_bias(self) -> torch.Tensor: # FIXME change to not use one-hot/einsum? return reindex_2d_einsum_lookup( self.relative_position_bias_table, self.window_size[0], self.window_size[1], self.height_lookup, self.width_lookup ) def forward(self, attn, shared_rel_pos: Optional[torch.Tensor] = None): return attn + self.get_bias()
pytorch-image-models/timm/layers/pos_embed_rel.py/0
{ "file_path": "pytorch-image-models/timm/layers/pos_embed_rel.py", "repo_id": "pytorch-image-models", "token_count": 9303 }
199
""" Cross Entropy w/ smoothing or soft targets Hacked together by / Copyright 2021 Ross Wightman """ import torch import torch.nn as nn import torch.nn.functional as F class LabelSmoothingCrossEntropy(nn.Module): """ NLL loss with label smoothing. """ def __init__(self, smoothing=0.1): super(LabelSmoothingCrossEntropy, self).__init__() assert smoothing < 1.0 self.smoothing = smoothing self.confidence = 1. - smoothing def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: logprobs = F.log_softmax(x, dim=-1) nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)) nll_loss = nll_loss.squeeze(1) smooth_loss = -logprobs.mean(dim=-1) loss = self.confidence * nll_loss + self.smoothing * smooth_loss return loss.mean() class SoftTargetCrossEntropy(nn.Module): def __init__(self): super(SoftTargetCrossEntropy, self).__init__() def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor: loss = torch.sum(-target * F.log_softmax(x, dim=-1), dim=-1) return loss.mean()
pytorch-image-models/timm/loss/cross_entropy.py/0
{ "file_path": "pytorch-image-models/timm/loss/cross_entropy.py", "repo_id": "pytorch-image-models", "token_count": 470 }
200
"""Pytorch Densenet implementation w/ tweaks This file is a copy of https://github.com/pytorch/vision 'densenet.py' (BSD-3-Clause) with fixed kwargs passthrough and addition of dynamic global avg/max pool. """ import re from collections import OrderedDict import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp from torch.jit.annotations import List from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import BatchNormAct2d, get_norm_act_layer, BlurPool2d, create_classifier from ._builder import build_model_with_cfg from ._manipulate import MATCH_PREV_GROUP from ._registry import register_model, generate_default_cfgs, register_model_deprecations __all__ = ['DenseNet'] class DenseLayer(nn.Module): def __init__( self, num_input_features, growth_rate, bn_size, norm_layer=BatchNormAct2d, drop_rate=0., grad_checkpointing=False, ): super(DenseLayer, self).__init__() self.add_module('norm1', norm_layer(num_input_features)), self.add_module('conv1', nn.Conv2d( num_input_features, bn_size * growth_rate, kernel_size=1, stride=1, bias=False)), self.add_module('norm2', norm_layer(bn_size * growth_rate)), self.add_module('conv2', nn.Conv2d( bn_size * growth_rate, growth_rate, kernel_size=3, stride=1, padding=1, bias=False)), self.drop_rate = float(drop_rate) self.grad_checkpointing = grad_checkpointing def bottleneck_fn(self, xs): # type: (List[torch.Tensor]) -> torch.Tensor concated_features = torch.cat(xs, 1) bottleneck_output = self.conv1(self.norm1(concated_features)) # noqa: T484 return bottleneck_output # todo: rewrite when torchscript supports any def any_requires_grad(self, x): # type: (List[torch.Tensor]) -> bool for tensor in x: if tensor.requires_grad: return True return False @torch.jit.unused # noqa: T484 def call_checkpoint_bottleneck(self, x): # type: (List[torch.Tensor]) -> torch.Tensor def closure(*xs): return self.bottleneck_fn(xs) return cp.checkpoint(closure, *x) @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (List[torch.Tensor]) -> (torch.Tensor) pass @torch.jit._overload_method # noqa: F811 def forward(self, x): # type: (torch.Tensor) -> (torch.Tensor) pass # torchscript does not yet support *args, so we overload method # allowing it to take either a List[Tensor] or single Tensor def forward(self, x): # noqa: F811 if isinstance(x, torch.Tensor): prev_features = [x] else: prev_features = x if self.grad_checkpointing and self.any_requires_grad(prev_features): if torch.jit.is_scripting(): raise Exception("Memory Efficient not supported in JIT") bottleneck_output = self.call_checkpoint_bottleneck(prev_features) else: bottleneck_output = self.bottleneck_fn(prev_features) new_features = self.conv2(self.norm2(bottleneck_output)) if self.drop_rate > 0: new_features = F.dropout(new_features, p=self.drop_rate, training=self.training) return new_features class DenseBlock(nn.ModuleDict): _version = 2 def __init__( self, num_layers, num_input_features, bn_size, growth_rate, norm_layer=BatchNormAct2d, drop_rate=0., grad_checkpointing=False, ): super(DenseBlock, self).__init__() for i in range(num_layers): layer = DenseLayer( num_input_features + i * growth_rate, growth_rate=growth_rate, bn_size=bn_size, norm_layer=norm_layer, drop_rate=drop_rate, grad_checkpointing=grad_checkpointing, ) self.add_module('denselayer%d' % (i + 1), layer) def forward(self, init_features): features = [init_features] for name, layer in self.items(): new_features = layer(features) features.append(new_features) return torch.cat(features, 1) class DenseTransition(nn.Sequential): def __init__( self, num_input_features, num_output_features, norm_layer=BatchNormAct2d, aa_layer=None, ): super(DenseTransition, self).__init__() self.add_module('norm', norm_layer(num_input_features)) self.add_module('conv', nn.Conv2d( num_input_features, num_output_features, kernel_size=1, stride=1, bias=False)) if aa_layer is not None: self.add_module('pool', aa_layer(num_output_features, stride=2)) else: self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) class DenseNet(nn.Module): r"""Densenet-BC model class, based on `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ Args: growth_rate (int) - how many filters to add each layer (`k` in paper) block_config (list of 4 ints) - how many layers in each pooling block bn_size (int) - multiplicative factor for number of bottle neck layers (i.e. bn_size * k features in the bottleneck layer) drop_rate (float) - dropout rate before classifier layer proj_drop_rate (float) - dropout rate after each dense layer num_classes (int) - number of classification classes memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_ """ def __init__( self, growth_rate=32, block_config=(6, 12, 24, 16), num_classes=1000, in_chans=3, global_pool='avg', bn_size=4, stem_type='', act_layer='relu', norm_layer='batchnorm2d', aa_layer=None, drop_rate=0., proj_drop_rate=0., memory_efficient=False, aa_stem_only=True, ): self.num_classes = num_classes super(DenseNet, self).__init__() norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) # Stem deep_stem = 'deep' in stem_type # 3x3 deep stem num_init_features = growth_rate * 2 if aa_layer is None: stem_pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) else: stem_pool = nn.Sequential(*[ nn.MaxPool2d(kernel_size=3, stride=1, padding=1), aa_layer(channels=num_init_features, stride=2)]) if deep_stem: stem_chs_1 = stem_chs_2 = growth_rate if 'tiered' in stem_type: stem_chs_1 = 3 * (growth_rate // 4) stem_chs_2 = num_init_features if 'narrow' in stem_type else 6 * (growth_rate // 4) self.features = nn.Sequential(OrderedDict([ ('conv0', nn.Conv2d(in_chans, stem_chs_1, 3, stride=2, padding=1, bias=False)), ('norm0', norm_layer(stem_chs_1)), ('conv1', nn.Conv2d(stem_chs_1, stem_chs_2, 3, stride=1, padding=1, bias=False)), ('norm1', norm_layer(stem_chs_2)), ('conv2', nn.Conv2d(stem_chs_2, num_init_features, 3, stride=1, padding=1, bias=False)), ('norm2', norm_layer(num_init_features)), ('pool0', stem_pool), ])) else: self.features = nn.Sequential(OrderedDict([ ('conv0', nn.Conv2d(in_chans, num_init_features, kernel_size=7, stride=2, padding=3, bias=False)), ('norm0', norm_layer(num_init_features)), ('pool0', stem_pool), ])) self.feature_info = [ dict(num_chs=num_init_features, reduction=2, module=f'features.norm{2 if deep_stem else 0}')] current_stride = 4 # DenseBlocks num_features = num_init_features for i, num_layers in enumerate(block_config): block = DenseBlock( num_layers=num_layers, num_input_features=num_features, bn_size=bn_size, growth_rate=growth_rate, norm_layer=norm_layer, drop_rate=proj_drop_rate, grad_checkpointing=memory_efficient, ) module_name = f'denseblock{(i + 1)}' self.features.add_module(module_name, block) num_features = num_features + num_layers * growth_rate transition_aa_layer = None if aa_stem_only else aa_layer if i != len(block_config) - 1: self.feature_info += [ dict(num_chs=num_features, reduction=current_stride, module='features.' + module_name)] current_stride *= 2 trans = DenseTransition( num_input_features=num_features, num_output_features=num_features // 2, norm_layer=norm_layer, aa_layer=transition_aa_layer, ) self.features.add_module(f'transition{i + 1}', trans) num_features = num_features // 2 # Final batch norm self.features.add_module('norm5', norm_layer(num_features)) self.feature_info += [dict(num_chs=num_features, reduction=current_stride, module='features.norm5')] self.num_features = num_features # Linear layer global_pool, classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool, ) self.global_pool = global_pool self.head_drop = nn.Dropout(drop_rate) self.classifier = classifier # Official init from torch repo. for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) elif isinstance(m, nn.Linear): nn.init.constant_(m.bias, 0) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^features\.conv[012]|features\.norm[012]|features\.pool[012]', blocks=r'^features\.(?:denseblock|transition)(\d+)' if coarse else [ (r'^features\.denseblock(\d+)\.denselayer(\d+)', None), (r'^features\.transition(\d+)', MATCH_PREV_GROUP) # FIXME combine with previous denselayer ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): for b in self.features.modules(): if isinstance(b, DenseLayer): b.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.global_pool, self.classifier = create_classifier( self.num_features, self.num_classes, pool_type=global_pool) def forward_features(self, x): return self.features(x) def forward(self, x): x = self.forward_features(x) x = self.global_pool(x) x = self.head_drop(x) x = self.classifier(x) return x def _filter_torchvision_pretrained(state_dict): pattern = re.compile( r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') for key in list(state_dict.keys()): res = pattern.match(key) if res: new_key = res.group(1) + res.group(2) state_dict[new_key] = state_dict[key] del state_dict[key] return state_dict def _create_densenet(variant, growth_rate, block_config, pretrained, **kwargs): kwargs['growth_rate'] = growth_rate kwargs['block_config'] = block_config return build_model_with_cfg( DenseNet, variant, pretrained, feature_cfg=dict(flatten_sequential=True), pretrained_filter_fn=_filter_torchvision_pretrained, **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'features.conv0', 'classifier': 'classifier', **kwargs, } default_cfgs = generate_default_cfgs({ 'densenet121.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'densenetblur121d.ra_in1k': _cfg( hf_hub_id='timm/', test_input_size=(3, 288, 288), test_crop_pct=0.95), 'densenet264d.untrained': _cfg(), 'densenet121.tv_in1k': _cfg(hf_hub_id='timm/'), 'densenet169.tv_in1k': _cfg(hf_hub_id='timm/'), 'densenet201.tv_in1k': _cfg(hf_hub_id='timm/'), 'densenet161.tv_in1k': _cfg(hf_hub_id='timm/'), }) @register_model def densenet121(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-121 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16)) model = _create_densenet('densenet121', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenetblur121d(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-121 w/ blur-pooling & 3-layer 3x3 stem `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model_args = dict(growth_rate=32, block_config=(6, 12, 24, 16), stem_type='deep', aa_layer=BlurPool2d) model = _create_densenet('densenetblur121d', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenet169(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-169 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model_args = dict(growth_rate=32, block_config=(6, 12, 32, 32)) model = _create_densenet('densenet169', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenet201(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-201 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model_args = dict(growth_rate=32, block_config=(6, 12, 48, 32)) model = _create_densenet('densenet201', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenet161(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-161 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model_args = dict(growth_rate=48, block_config=(6, 12, 36, 24)) model = _create_densenet('densenet161', pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def densenet264d(pretrained=False, **kwargs) -> DenseNet: r"""Densenet-264 model from `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>` """ model_args = dict(growth_rate=48, block_config=(6, 12, 64, 48), stem_type='deep') model = _create_densenet('densenet264d', pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'tv_densenet121': 'densenet121.tv_in1k', })
pytorch-image-models/timm/models/densenet.py/0
{ "file_path": "pytorch-image-models/timm/models/densenet.py", "repo_id": "pytorch-image-models", "token_count": 7537 }
201
""" An implementation of GhostNet & GhostNetV2 Models as defined in: GhostNet: More Features from Cheap Operations. https://arxiv.org/abs/1911.11907 GhostNetV2: Enhance Cheap Operation with Long-Range Attention. https://proceedings.neurips.cc/paper_files/paper/2022/file/40b60852a4abdaa696b5a1a78da34635-Paper-Conference.pdf The train script & code of models at: Original model: https://github.com/huawei-noah/CV-backbones/tree/master/ghostnet_pytorch Original model: https://github.com/huawei-noah/Efficient-AI-Backbones/blob/master/ghostnetv2_pytorch/model/ghostnetv2_torch.py """ import math from functools import partial import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import SelectAdaptivePool2d, Linear, make_divisible from ._builder import build_model_with_cfg from ._efficientnet_blocks import SqueezeExcite, ConvBnAct from ._manipulate import checkpoint_seq from ._registry import register_model, generate_default_cfgs __all__ = ['GhostNet'] _SE_LAYER = partial(SqueezeExcite, gate_layer='hard_sigmoid', rd_round_fn=partial(make_divisible, divisor=4)) class GhostModule(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=1, ratio=2, dw_size=3, stride=1, use_act=True, act_layer=nn.ReLU, ): super(GhostModule, self).__init__() self.out_chs = out_chs init_chs = math.ceil(out_chs / ratio) new_chs = init_chs * (ratio - 1) self.primary_conv = nn.Sequential( nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(init_chs), act_layer(inplace=True) if use_act else nn.Identity(), ) self.cheap_operation = nn.Sequential( nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size//2, groups=init_chs, bias=False), nn.BatchNorm2d(new_chs), act_layer(inplace=True) if use_act else nn.Identity(), ) def forward(self, x): x1 = self.primary_conv(x) x2 = self.cheap_operation(x1) out = torch.cat([x1, x2], dim=1) return out[:, :self.out_chs, :, :] class GhostModuleV2(nn.Module): def __init__( self, in_chs, out_chs, kernel_size=1, ratio=2, dw_size=3, stride=1, use_act=True, act_layer=nn.ReLU, ): super().__init__() self.gate_fn = nn.Sigmoid() self.out_chs = out_chs init_chs = math.ceil(out_chs / ratio) new_chs = init_chs * (ratio - 1) self.primary_conv = nn.Sequential( nn.Conv2d(in_chs, init_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(init_chs), act_layer(inplace=True) if use_act else nn.Identity(), ) self.cheap_operation = nn.Sequential( nn.Conv2d(init_chs, new_chs, dw_size, 1, dw_size // 2, groups=init_chs, bias=False), nn.BatchNorm2d(new_chs), act_layer(inplace=True) if use_act else nn.Identity(), ) self.short_conv = nn.Sequential( nn.Conv2d(in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False), nn.BatchNorm2d(out_chs), nn.Conv2d(out_chs, out_chs, kernel_size=(1, 5), stride=1, padding=(0, 2), groups=out_chs, bias=False), nn.BatchNorm2d(out_chs), nn.Conv2d(out_chs, out_chs, kernel_size=(5, 1), stride=1, padding=(2, 0), groups=out_chs, bias=False), nn.BatchNorm2d(out_chs), ) def forward(self, x): res = self.short_conv(F.avg_pool2d(x, kernel_size=2, stride=2)) x1 = self.primary_conv(x) x2 = self.cheap_operation(x1) out = torch.cat([x1, x2], dim=1) return out[:, :self.out_chs, :, :] * F.interpolate( self.gate_fn(res), size=(out.shape[-2], out.shape[-1]), mode='nearest') class GhostBottleneck(nn.Module): """ Ghost bottleneck w/ optional SE""" def __init__( self, in_chs, mid_chs, out_chs, dw_kernel_size=3, stride=1, act_layer=nn.ReLU, se_ratio=0., mode='original', ): super(GhostBottleneck, self).__init__() has_se = se_ratio is not None and se_ratio > 0. self.stride = stride # Point-wise expansion if mode == 'original': self.ghost1 = GhostModule(in_chs, mid_chs, use_act=True, act_layer=act_layer) else: self.ghost1 = GhostModuleV2(in_chs, mid_chs, use_act=True, act_layer=act_layer) # Depth-wise convolution if self.stride > 1: self.conv_dw = nn.Conv2d( mid_chs, mid_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size-1)//2, groups=mid_chs, bias=False) self.bn_dw = nn.BatchNorm2d(mid_chs) else: self.conv_dw = None self.bn_dw = None # Squeeze-and-excitation self.se = _SE_LAYER(mid_chs, rd_ratio=se_ratio) if has_se else None # Point-wise linear projection self.ghost2 = GhostModule(mid_chs, out_chs, use_act=False) # shortcut if in_chs == out_chs and self.stride == 1: self.shortcut = nn.Sequential() else: self.shortcut = nn.Sequential( nn.Conv2d( in_chs, in_chs, dw_kernel_size, stride=stride, padding=(dw_kernel_size-1)//2, groups=in_chs, bias=False), nn.BatchNorm2d(in_chs), nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False), nn.BatchNorm2d(out_chs), ) def forward(self, x): shortcut = x # 1st ghost bottleneck x = self.ghost1(x) # Depth-wise convolution if self.conv_dw is not None: x = self.conv_dw(x) x = self.bn_dw(x) # Squeeze-and-excitation if self.se is not None: x = self.se(x) # 2nd ghost bottleneck x = self.ghost2(x) x += self.shortcut(shortcut) return x class GhostNet(nn.Module): def __init__( self, cfgs, num_classes=1000, width=1.0, in_chans=3, output_stride=32, global_pool='avg', drop_rate=0.2, version='v1', ): super(GhostNet, self).__init__() # setting of inverted residual blocks assert output_stride == 32, 'only output_stride==32 is valid, dilation not supported' self.cfgs = cfgs self.num_classes = num_classes self.drop_rate = drop_rate self.grad_checkpointing = False self.feature_info = [] # building first layer stem_chs = make_divisible(16 * width, 4) self.conv_stem = nn.Conv2d(in_chans, stem_chs, 3, 2, 1, bias=False) self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=f'conv_stem')) self.bn1 = nn.BatchNorm2d(stem_chs) self.act1 = nn.ReLU(inplace=True) prev_chs = stem_chs # building inverted residual blocks stages = nn.ModuleList([]) stage_idx = 0 layer_idx = 0 net_stride = 2 for cfg in self.cfgs: layers = [] s = 1 for k, exp_size, c, se_ratio, s in cfg: out_chs = make_divisible(c * width, 4) mid_chs = make_divisible(exp_size * width, 4) layer_kwargs = {} if version == 'v2' and layer_idx > 1: layer_kwargs['mode'] = 'attn' layers.append(GhostBottleneck(prev_chs, mid_chs, out_chs, k, s, se_ratio=se_ratio, **layer_kwargs)) prev_chs = out_chs layer_idx += 1 if s > 1: net_stride *= 2 self.feature_info.append(dict( num_chs=prev_chs, reduction=net_stride, module=f'blocks.{stage_idx}')) stages.append(nn.Sequential(*layers)) stage_idx += 1 out_chs = make_divisible(exp_size * width, 4) stages.append(nn.Sequential(ConvBnAct(prev_chs, out_chs, 1))) self.pool_dim = prev_chs = out_chs self.blocks = nn.Sequential(*stages) # building last several layers self.num_features = out_chs = 1280 self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.conv_head = nn.Conv2d(prev_chs, out_chs, 1, 1, 0, bias=True) self.act2 = nn.ReLU(inplace=True) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(out_chs, num_classes) if num_classes > 0 else nn.Identity() # FIXME init @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^conv_stem|bn1', blocks=[ (r'^blocks\.(\d+)' if coarse else r'^blocks\.(\d+)\.(\d+)', None), (r'conv_head', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.classifier def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes # cannot meaningfully change pooling of efficient head after creation self.global_pool = SelectAdaptivePool2d(pool_type=global_pool) self.flatten = nn.Flatten(1) if global_pool else nn.Identity() # don't flatten if pooling disabled self.classifier = Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity() def forward_features(self, x): x = self.conv_stem(x) x = self.bn1(x) x = self.act1(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.blocks, x, flatten=True) else: x = self.blocks(x) return x def forward_head(self, x): x = self.global_pool(x) x = self.conv_head(x) x = self.act2(x) x = self.flatten(x) if self.drop_rate > 0.: x = F.dropout(x, p=self.drop_rate, training=self.training) x = self.classifier(x) return x def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def checkpoint_filter_fn(state_dict, model: nn.Module): out_dict = {} for k, v in state_dict.items(): if 'total' in k: continue out_dict[k] = v return out_dict def _create_ghostnet(variant, width=1.0, pretrained=False, **kwargs): """ Constructs a GhostNet model """ cfgs = [ # k, t, c, SE, s # stage1 [[3, 16, 16, 0, 1]], # stage2 [[3, 48, 24, 0, 2]], [[3, 72, 24, 0, 1]], # stage3 [[5, 72, 40, 0.25, 2]], [[5, 120, 40, 0.25, 1]], # stage4 [[3, 240, 80, 0, 2]], [[3, 200, 80, 0, 1], [3, 184, 80, 0, 1], [3, 184, 80, 0, 1], [3, 480, 112, 0.25, 1], [3, 672, 112, 0.25, 1] ], # stage5 [[5, 672, 160, 0.25, 2]], [[5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1], [5, 960, 160, 0, 1], [5, 960, 160, 0.25, 1] ] ] model_kwargs = dict( cfgs=cfgs, width=width, **kwargs, ) return build_model_with_cfg( GhostNet, variant, pretrained, pretrained_filter_fn=checkpoint_filter_fn, feature_cfg=dict(flatten_sequential=True), **model_kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bicubic', 'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD, 'first_conv': 'conv_stem', 'classifier': 'classifier', **kwargs } default_cfgs = generate_default_cfgs({ 'ghostnet_050.untrained': _cfg(), 'ghostnet_100.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/huawei-noah/CV-backbones/releases/download/ghostnet_pth/ghostnet_1x.pth' ), 'ghostnet_130.untrained': _cfg(), 'ghostnetv2_100.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_10.pth.tar' ), 'ghostnetv2_130.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_13.pth.tar' ), 'ghostnetv2_160.in1k': _cfg( hf_hub_id='timm/', # url='https://github.com/huawei-noah/Efficient-AI-Backbones/releases/download/GhostNetV2/ck_ghostnetv2_16.pth.tar' ), }) @register_model def ghostnet_050(pretrained=False, **kwargs) -> GhostNet: """ GhostNet-0.5x """ model = _create_ghostnet('ghostnet_050', width=0.5, pretrained=pretrained, **kwargs) return model @register_model def ghostnet_100(pretrained=False, **kwargs) -> GhostNet: """ GhostNet-1.0x """ model = _create_ghostnet('ghostnet_100', width=1.0, pretrained=pretrained, **kwargs) return model @register_model def ghostnet_130(pretrained=False, **kwargs) -> GhostNet: """ GhostNet-1.3x """ model = _create_ghostnet('ghostnet_130', width=1.3, pretrained=pretrained, **kwargs) return model @register_model def ghostnetv2_100(pretrained=False, **kwargs) -> GhostNet: """ GhostNetV2-1.0x """ model = _create_ghostnet('ghostnetv2_100', width=1.0, pretrained=pretrained, version='v2', **kwargs) return model @register_model def ghostnetv2_130(pretrained=False, **kwargs) -> GhostNet: """ GhostNetV2-1.3x """ model = _create_ghostnet('ghostnetv2_130', width=1.3, pretrained=pretrained, version='v2', **kwargs) return model @register_model def ghostnetv2_160(pretrained=False, **kwargs) -> GhostNet: """ GhostNetV2-1.6x """ model = _create_ghostnet('ghostnetv2_160', width=1.6, pretrained=pretrained, version='v2', **kwargs) return model
pytorch-image-models/timm/models/ghostnet.py/0
{ "file_path": "pytorch-image-models/timm/models/ghostnet.py", "repo_id": "pytorch-image-models", "token_count": 7430 }
202
""" MobileViT Paper: V1: `MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer` - https://arxiv.org/abs/2110.02178 V2: `Separable Self-attention for Mobile Vision Transformers` - https://arxiv.org/abs/2206.02680 MobileVitBlock and checkpoints adapted from https://github.com/apple/ml-cvnets (original copyright below) License: https://github.com/apple/ml-cvnets/blob/main/LICENSE (Apple open source) Rest of code, ByobNet, and Transformer block hacked together by / Copyright 2022, Ross Wightman """ # # For licensing see accompanying LICENSE file. # Copyright (C) 2020 Apple Inc. All Rights Reserved. # import math from typing import Callable, Tuple, Optional import torch import torch.nn.functional as F from torch import nn from timm.layers import to_2tuple, make_divisible, GroupNorm1, ConvMlp, DropPath, is_exportable from ._builder import build_model_with_cfg from ._features_fx import register_notrace_module from ._registry import register_model, generate_default_cfgs, register_model_deprecations from .byobnet import register_block, ByoBlockCfg, ByoModelCfg, ByobNet, LayerFn, num_groups from .vision_transformer import Block as TransformerBlock __all__ = [] def _inverted_residual_block(d, c, s, br=4.0): # inverted residual is a bottleneck block with bottle_ratio > 1 applied to in_chs, linear output, gs=1 (depthwise) return ByoBlockCfg( type='bottle', d=d, c=c, s=s, gs=1, br=br, block_kwargs=dict(bottle_in=True, linear_out=True)) def _mobilevit_block(d, c, s, transformer_dim, transformer_depth, patch_size=4, br=4.0): # inverted residual + mobilevit blocks as per MobileViT network return ( _inverted_residual_block(d=d, c=c, s=s, br=br), ByoBlockCfg( type='mobilevit', d=1, c=c, s=1, block_kwargs=dict( transformer_dim=transformer_dim, transformer_depth=transformer_depth, patch_size=patch_size) ) ) def _mobilevitv2_block(d, c, s, transformer_depth, patch_size=2, br=2.0, transformer_br=0.5): # inverted residual + mobilevit blocks as per MobileViT network return ( _inverted_residual_block(d=d, c=c, s=s, br=br), ByoBlockCfg( type='mobilevit2', d=1, c=c, s=1, br=transformer_br, gs=1, block_kwargs=dict( transformer_depth=transformer_depth, patch_size=patch_size) ) ) def _mobilevitv2_cfg(multiplier=1.0): chs = (64, 128, 256, 384, 512) if multiplier != 1.0: chs = tuple([int(c * multiplier) for c in chs]) cfg = ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=chs[0], s=1, br=2.0), _inverted_residual_block(d=2, c=chs[1], s=2, br=2.0), _mobilevitv2_block(d=1, c=chs[2], s=2, transformer_depth=2), _mobilevitv2_block(d=1, c=chs[3], s=2, transformer_depth=4), _mobilevitv2_block(d=1, c=chs[4], s=2, transformer_depth=3), ), stem_chs=int(32 * multiplier), stem_type='3x3', stem_pool='', downsample='', act_layer='silu', ) return cfg model_cfgs = dict( mobilevit_xxs=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=16, s=1, br=2.0), _inverted_residual_block(d=3, c=24, s=2, br=2.0), _mobilevit_block(d=1, c=48, s=2, transformer_dim=64, transformer_depth=2, patch_size=2, br=2.0), _mobilevit_block(d=1, c=64, s=2, transformer_dim=80, transformer_depth=4, patch_size=2, br=2.0), _mobilevit_block(d=1, c=80, s=2, transformer_dim=96, transformer_depth=3, patch_size=2, br=2.0), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=320, ), mobilevit_xs=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=48, s=2), _mobilevit_block(d=1, c=64, s=2, transformer_dim=96, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=80, s=2, transformer_dim=120, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=3, patch_size=2), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=384, ), mobilevit_s=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=64, s=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', act_layer='silu', num_features=640, ), semobilevit_s=ByoModelCfg( blocks=( _inverted_residual_block(d=1, c=32, s=1), _inverted_residual_block(d=3, c=64, s=2), _mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2), _mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2), _mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2), ), stem_chs=16, stem_type='3x3', stem_pool='', downsample='', attn_layer='se', attn_kwargs=dict(rd_ratio=1/8), num_features=640, ), mobilevitv2_050=_mobilevitv2_cfg(.50), mobilevitv2_075=_mobilevitv2_cfg(.75), mobilevitv2_125=_mobilevitv2_cfg(1.25), mobilevitv2_100=_mobilevitv2_cfg(1.0), mobilevitv2_150=_mobilevitv2_cfg(1.5), mobilevitv2_175=_mobilevitv2_cfg(1.75), mobilevitv2_200=_mobilevitv2_cfg(2.0), ) @register_notrace_module class MobileVitBlock(nn.Module): """ MobileViT block Paper: https://arxiv.org/abs/2110.02178?context=cs.LG """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 3, stride: int = 1, bottle_ratio: float = 1.0, group_size: Optional[int] = None, dilation: Tuple[int, int] = (1, 1), mlp_ratio: float = 2.0, transformer_dim: Optional[int] = None, transformer_depth: int = 2, patch_size: int = 8, num_heads: int = 4, attn_drop: float = 0., drop: int = 0., no_fusion: bool = False, drop_path_rate: float = 0., layers: LayerFn = None, transformer_norm_layer: Callable = nn.LayerNorm, **kwargs, # eat unused args ): super(MobileVitBlock, self).__init__() layers = layers or LayerFn() groups = num_groups(group_size, in_chs) out_chs = out_chs or in_chs transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) self.conv_kxk = layers.conv_norm_act( in_chs, in_chs, kernel_size=kernel_size, stride=stride, groups=groups, dilation=dilation[0]) self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) self.transformer = nn.Sequential(*[ TransformerBlock( transformer_dim, mlp_ratio=mlp_ratio, num_heads=num_heads, qkv_bias=True, attn_drop=attn_drop, proj_drop=drop, drop_path=drop_path_rate, act_layer=layers.act, norm_layer=transformer_norm_layer, ) for _ in range(transformer_depth) ]) self.norm = transformer_norm_layer(transformer_dim) self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1) if no_fusion: self.conv_fusion = None else: self.conv_fusion = layers.conv_norm_act(in_chs + out_chs, out_chs, kernel_size=kernel_size, stride=1) self.patch_size = to_2tuple(patch_size) self.patch_area = self.patch_size[0] * self.patch_size[1] def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x # Local representation x = self.conv_kxk(x) x = self.conv_1x1(x) # Unfold (feature map -> patches) patch_h, patch_w = self.patch_size B, C, H, W = x.shape new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w num_patches = num_patch_h * num_patch_w # N interpolate = False if new_h != H or new_w != W: # Note: Padding can be done, but then it needs to be handled in attention function. x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=False) interpolate = True # [B, C, H, W] --> [B * C * n_h, n_w, p_h, p_w] x = x.reshape(B * C * num_patch_h, patch_h, num_patch_w, patch_w).transpose(1, 2) # [B * C * n_h, n_w, p_h, p_w] --> [BP, N, C] where P = p_h * p_w and N = n_h * n_w x = x.reshape(B, C, num_patches, self.patch_area).transpose(1, 3).reshape(B * self.patch_area, num_patches, -1) # Global representations x = self.transformer(x) x = self.norm(x) # Fold (patch -> feature map) # [B, P, N, C] --> [B*C*n_h, n_w, p_h, p_w] x = x.contiguous().view(B, self.patch_area, num_patches, -1) x = x.transpose(1, 3).reshape(B * C * num_patch_h, num_patch_w, patch_h, patch_w) # [B*C*n_h, n_w, p_h, p_w] --> [B*C*n_h, p_h, n_w, p_w] --> [B, C, H, W] x = x.transpose(1, 2).reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) if interpolate: x = F.interpolate(x, size=(H, W), mode="bilinear", align_corners=False) x = self.conv_proj(x) if self.conv_fusion is not None: x = self.conv_fusion(torch.cat((shortcut, x), dim=1)) return x class LinearSelfAttention(nn.Module): """ This layer applies a self-attention with linear complexity, as described in `https://arxiv.org/abs/2206.02680` This layer can be used for self- as well as cross-attention. Args: embed_dim (int): :math:`C` from an expected input of size :math:`(N, C, H, W)` attn_drop (float): Dropout value for context scores. Default: 0.0 bias (bool): Use bias in learnable layers. Default: True Shape: - Input: :math:`(N, C, P, N)` where :math:`N` is the batch size, :math:`C` is the input channels, :math:`P` is the number of pixels in the patch, and :math:`N` is the number of patches - Output: same as the input .. note:: For MobileViTv2, we unfold the feature map [B, C, H, W] into [B, C, P, N] where P is the number of pixels in a patch and N is the number of patches. Because channel is the first dimension in this unfolded tensor, we use point-wise convolution (instead of a linear layer). This avoids a transpose operation (which may be expensive on resource-constrained devices) that may be required to convert the unfolded tensor from channel-first to channel-last format in case of a linear layer. """ def __init__( self, embed_dim: int, attn_drop: float = 0.0, proj_drop: float = 0.0, bias: bool = True, ) -> None: super().__init__() self.embed_dim = embed_dim self.qkv_proj = nn.Conv2d( in_channels=embed_dim, out_channels=1 + (2 * embed_dim), bias=bias, kernel_size=1, ) self.attn_drop = nn.Dropout(attn_drop) self.out_proj = nn.Conv2d( in_channels=embed_dim, out_channels=embed_dim, bias=bias, kernel_size=1, ) self.out_drop = nn.Dropout(proj_drop) def _forward_self_attn(self, x: torch.Tensor) -> torch.Tensor: # [B, C, P, N] --> [B, h + 2d, P, N] qkv = self.qkv_proj(x) # Project x into query, key and value # Query --> [B, 1, P, N] # value, key --> [B, d, P, N] query, key, value = qkv.split([1, self.embed_dim, self.embed_dim], dim=1) # apply softmax along N dimension context_scores = F.softmax(query, dim=-1) context_scores = self.attn_drop(context_scores) # Compute context vector # [B, d, P, N] x [B, 1, P, N] -> [B, d, P, N] --> [B, d, P, 1] context_vector = (key * context_scores).sum(dim=-1, keepdim=True) # combine context vector with values # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] out = F.relu(value) * context_vector.expand_as(value) out = self.out_proj(out) out = self.out_drop(out) return out @torch.jit.ignore() def _forward_cross_attn(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: # x --> [B, C, P, N] # x_prev = [B, C, P, M] batch_size, in_dim, kv_patch_area, kv_num_patches = x.shape q_patch_area, q_num_patches = x.shape[-2:] assert ( kv_patch_area == q_patch_area ), "The number of pixels in a patch for query and key_value should be the same" # compute query, key, and value # [B, C, P, M] --> [B, 1 + d, P, M] qk = F.conv2d( x_prev, weight=self.qkv_proj.weight[:self.embed_dim + 1], bias=self.qkv_proj.bias[:self.embed_dim + 1], ) # [B, 1 + d, P, M] --> [B, 1, P, M], [B, d, P, M] query, key = qk.split([1, self.embed_dim], dim=1) # [B, C, P, N] --> [B, d, P, N] value = F.conv2d( x, weight=self.qkv_proj.weight[self.embed_dim + 1], bias=self.qkv_proj.bias[self.embed_dim + 1] if self.qkv_proj.bias is not None else None, ) # apply softmax along M dimension context_scores = F.softmax(query, dim=-1) context_scores = self.attn_drop(context_scores) # compute context vector # [B, d, P, M] * [B, 1, P, M] -> [B, d, P, M] --> [B, d, P, 1] context_vector = (key * context_scores).sum(dim=-1, keepdim=True) # combine context vector with values # [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N] out = F.relu(value) * context_vector.expand_as(value) out = self.out_proj(out) out = self.out_drop(out) return out def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: if x_prev is None: return self._forward_self_attn(x) else: return self._forward_cross_attn(x, x_prev=x_prev) class LinearTransformerBlock(nn.Module): """ This class defines the pre-norm transformer encoder with linear self-attention in `MobileViTv2 paper <>`_ Args: embed_dim (int): :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, P, N)` mlp_ratio (float): Inner dimension ratio of the FFN relative to embed_dim drop (float): Dropout rate. Default: 0.0 attn_drop (float): Dropout rate for attention in multi-head attention. Default: 0.0 drop_path (float): Stochastic depth rate Default: 0.0 norm_layer (Callable): Normalization layer. Default: layer_norm_2d Shape: - Input: :math:`(B, C_{in}, P, N)` where :math:`B` is batch size, :math:`C_{in}` is input embedding dim, :math:`P` is number of pixels in a patch, and :math:`N` is number of patches, - Output: same shape as the input """ def __init__( self, embed_dim: int, mlp_ratio: float = 2.0, drop: float = 0.0, attn_drop: float = 0.0, drop_path: float = 0.0, act_layer=None, norm_layer=None, ) -> None: super().__init__() act_layer = act_layer or nn.SiLU norm_layer = norm_layer or GroupNorm1 self.norm1 = norm_layer(embed_dim) self.attn = LinearSelfAttention(embed_dim=embed_dim, attn_drop=attn_drop, proj_drop=drop) self.drop_path1 = DropPath(drop_path) self.norm2 = norm_layer(embed_dim) self.mlp = ConvMlp( in_features=embed_dim, hidden_features=int(embed_dim * mlp_ratio), act_layer=act_layer, drop=drop) self.drop_path2 = DropPath(drop_path) def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor: if x_prev is None: # self-attention x = x + self.drop_path1(self.attn(self.norm1(x))) else: # cross-attention res = x x = self.norm1(x) # norm x = self.attn(x, x_prev) # attn x = self.drop_path1(x) + res # residual # Feed forward network x = x + self.drop_path2(self.mlp(self.norm2(x))) return x @register_notrace_module class MobileVitV2Block(nn.Module): """ This class defines the `MobileViTv2 block <>`_ """ def __init__( self, in_chs: int, out_chs: Optional[int] = None, kernel_size: int = 3, bottle_ratio: float = 1.0, group_size: Optional[int] = 1, dilation: Tuple[int, int] = (1, 1), mlp_ratio: float = 2.0, transformer_dim: Optional[int] = None, transformer_depth: int = 2, patch_size: int = 8, attn_drop: float = 0., drop: int = 0., drop_path_rate: float = 0., layers: LayerFn = None, transformer_norm_layer: Callable = GroupNorm1, **kwargs, # eat unused args ): super(MobileVitV2Block, self).__init__() layers = layers or LayerFn() groups = num_groups(group_size, in_chs) out_chs = out_chs or in_chs transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs) self.conv_kxk = layers.conv_norm_act( in_chs, in_chs, kernel_size=kernel_size, stride=1, groups=groups, dilation=dilation[0]) self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False) self.transformer = nn.Sequential(*[ LinearTransformerBlock( transformer_dim, mlp_ratio=mlp_ratio, attn_drop=attn_drop, drop=drop, drop_path=drop_path_rate, act_layer=layers.act, norm_layer=transformer_norm_layer ) for _ in range(transformer_depth) ]) self.norm = transformer_norm_layer(transformer_dim) self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1, apply_act=False) self.patch_size = to_2tuple(patch_size) self.patch_area = self.patch_size[0] * self.patch_size[1] self.coreml_exportable = is_exportable() def forward(self, x: torch.Tensor) -> torch.Tensor: B, C, H, W = x.shape patch_h, patch_w = self.patch_size new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w num_patches = num_patch_h * num_patch_w # N if new_h != H or new_w != W: x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=True) # Local representation x = self.conv_kxk(x) x = self.conv_1x1(x) # Unfold (feature map -> patches), [B, C, H, W] -> [B, C, P, N] C = x.shape[1] if self.coreml_exportable: x = F.unfold(x, kernel_size=(patch_h, patch_w), stride=(patch_h, patch_w)) else: x = x.reshape(B, C, num_patch_h, patch_h, num_patch_w, patch_w).permute(0, 1, 3, 5, 2, 4) x = x.reshape(B, C, -1, num_patches) # Global representations x = self.transformer(x) x = self.norm(x) # Fold (patches -> feature map), [B, C, P, N] --> [B, C, H, W] if self.coreml_exportable: # adopted from https://github.com/apple/ml-cvnets/blob/main/cvnets/modules/mobilevit_block.py#L609-L624 x = x.reshape(B, C * patch_h * patch_w, num_patch_h, num_patch_w) x = F.pixel_shuffle(x, upscale_factor=patch_h) else: x = x.reshape(B, C, patch_h, patch_w, num_patch_h, num_patch_w).permute(0, 1, 4, 2, 5, 3) x = x.reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w) x = self.conv_proj(x) return x register_block('mobilevit', MobileVitBlock) register_block('mobilevit2', MobileVitV2Block) def _create_mobilevit(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _create_mobilevit2(variant, cfg_variant=None, pretrained=False, **kwargs): return build_model_with_cfg( ByobNet, variant, pretrained, model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant], feature_cfg=dict(flatten_sequential=True), **kwargs) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8), 'crop_pct': 0.9, 'interpolation': 'bicubic', 'mean': (0., 0., 0.), 'std': (1., 1., 1.), 'first_conv': 'stem.conv', 'classifier': 'head.fc', 'fixed_input_size': False, **kwargs } default_cfgs = generate_default_cfgs({ 'mobilevit_xxs.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevit_xs.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevit_s.cvnets_in1k': _cfg(hf_hub_id='timm/'), 'mobilevitv2_050.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_075.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_100.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_125.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_175.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_200.cvnets_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in22k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_175.cvnets_in22k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_200.cvnets_in22k_ft_in1k': _cfg( hf_hub_id='timm/', crop_pct=0.888), 'mobilevitv2_150.cvnets_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'mobilevitv2_175.cvnets_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), 'mobilevitv2_200.cvnets_in22k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0), }) @register_model def mobilevit_xxs(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_xxs', pretrained=pretrained, **kwargs) @register_model def mobilevit_xs(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_xs', pretrained=pretrained, **kwargs) @register_model def mobilevit_s(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevit_s', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_050(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_050', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_075(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_075', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_100(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_100', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_125(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_125', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_150(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_150', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_175(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_175', pretrained=pretrained, **kwargs) @register_model def mobilevitv2_200(pretrained=False, **kwargs) -> ByobNet: return _create_mobilevit('mobilevitv2_200', pretrained=pretrained, **kwargs) register_model_deprecations(__name__, { 'mobilevitv2_150_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k', 'mobilevitv2_175_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k', 'mobilevitv2_200_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k', 'mobilevitv2_150_384_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k_384', 'mobilevitv2_175_384_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k_384', 'mobilevitv2_200_384_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k_384', })
pytorch-image-models/timm/models/mobilevit.py/0
{ "file_path": "pytorch-image-models/timm/models/mobilevit.py", "repo_id": "pytorch-image-models", "token_count": 12812 }
203
"""Pre-Activation ResNet v2 with GroupNorm and Weight Standardization. A PyTorch implementation of ResNetV2 adapted from the Google Big-Transfer (BiT) source code at https://github.com/google-research/big_transfer to match timm interfaces. The BiT weights have been included here as pretrained models from their original .NPZ checkpoints. Additionally, supports non pre-activation bottleneck for use as a backbone for Vision Transfomers (ViT) and extra padding support to allow porting of official Hybrid ResNet pretrained weights from https://github.com/google-research/vision_transformer Thanks to the Google team for the above two repositories and associated papers: * Big Transfer (BiT): General Visual Representation Learning - https://arxiv.org/abs/1912.11370 * An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale - https://arxiv.org/abs/2010.11929 * Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 Original copyright of Google code below, modifications by Ross Wightman, Copyright 2020. """ # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import OrderedDict # pylint: disable=g-importing-member from functools import partial import torch import torch.nn as nn from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD from timm.layers import GroupNormAct, BatchNormAct2d, EvoNorm2dS0, FilterResponseNormTlu2d, ClassifierHead, \ DropPath, AvgPool2dSame, create_pool2d, StdConv2d, create_conv2d, get_act_layer, get_norm_act_layer, make_divisible from ._builder import build_model_with_cfg from ._manipulate import checkpoint_seq, named_apply, adapt_input_conv from ._registry import generate_default_cfgs, register_model, register_model_deprecations __all__ = ['ResNetV2'] # model_registry will add each entrypoint fn to this class PreActBottleneck(nn.Module): """Pre-activation (v2) bottleneck block. Follows the implementation of "Identity Mappings in Deep Residual Networks": https://github.com/KaimingHe/resnet-1k-layers/blob/master/resnet-pre-act.lua Except it puts the stride on 3x3 conv when available. """ def __init__( self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0., ): super().__init__() first_dilation = first_dilation or dilation conv_layer = conv_layer or StdConv2d norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) out_chs = out_chs or in_chs mid_chs = make_divisible(out_chs * bottle_ratio) if proj_layer is not None: self.downsample = proj_layer( in_chs, out_chs, stride=stride, dilation=dilation, first_dilation=first_dilation, preact=True, conv_layer=conv_layer, norm_layer=norm_layer) else: self.downsample = None self.norm1 = norm_layer(in_chs) self.conv1 = conv_layer(in_chs, mid_chs, 1) self.norm2 = norm_layer(mid_chs) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) self.norm3 = norm_layer(mid_chs) self.conv3 = conv_layer(mid_chs, out_chs, 1) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() def zero_init_last(self): nn.init.zeros_(self.conv3.weight) def forward(self, x): x_preact = self.norm1(x) # shortcut branch shortcut = x if self.downsample is not None: shortcut = self.downsample(x_preact) # residual branch x = self.conv1(x_preact) x = self.conv2(self.norm2(x)) x = self.conv3(self.norm3(x)) x = self.drop_path(x) return x + shortcut class Bottleneck(nn.Module): """Non Pre-activation bottleneck block, equiv to V1.5/V1b Bottleneck. Used for ViT. """ def __init__( self, in_chs, out_chs=None, bottle_ratio=0.25, stride=1, dilation=1, first_dilation=None, groups=1, act_layer=None, conv_layer=None, norm_layer=None, proj_layer=None, drop_path_rate=0., ): super().__init__() first_dilation = first_dilation or dilation act_layer = act_layer or nn.ReLU conv_layer = conv_layer or StdConv2d norm_layer = norm_layer or partial(GroupNormAct, num_groups=32) out_chs = out_chs or in_chs mid_chs = make_divisible(out_chs * bottle_ratio) if proj_layer is not None: self.downsample = proj_layer( in_chs, out_chs, stride=stride, dilation=dilation, preact=False, conv_layer=conv_layer, norm_layer=norm_layer) else: self.downsample = None self.conv1 = conv_layer(in_chs, mid_chs, 1) self.norm1 = norm_layer(mid_chs) self.conv2 = conv_layer(mid_chs, mid_chs, 3, stride=stride, dilation=first_dilation, groups=groups) self.norm2 = norm_layer(mid_chs) self.conv3 = conv_layer(mid_chs, out_chs, 1) self.norm3 = norm_layer(out_chs, apply_act=False) self.drop_path = DropPath(drop_path_rate) if drop_path_rate > 0 else nn.Identity() self.act3 = act_layer(inplace=True) def zero_init_last(self): if getattr(self.norm3, 'weight', None) is not None: nn.init.zeros_(self.norm3.weight) def forward(self, x): # shortcut branch shortcut = x if self.downsample is not None: shortcut = self.downsample(x) # residual x = self.conv1(x) x = self.norm1(x) x = self.conv2(x) x = self.norm2(x) x = self.conv3(x) x = self.norm3(x) x = self.drop_path(x) x = self.act3(x + shortcut) return x class DownsampleConv(nn.Module): def __init__( self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, conv_layer=None, norm_layer=None, ): super(DownsampleConv, self).__init__() self.conv = conv_layer(in_chs, out_chs, 1, stride=stride) self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) def forward(self, x): return self.norm(self.conv(x)) class DownsampleAvg(nn.Module): def __init__( self, in_chs, out_chs, stride=1, dilation=1, first_dilation=None, preact=True, conv_layer=None, norm_layer=None, ): """ AvgPool Downsampling as in 'D' ResNet variants. This is not in RegNet space but I might experiment.""" super(DownsampleAvg, self).__init__() avg_stride = stride if dilation == 1 else 1 if stride > 1 or dilation > 1: avg_pool_fn = AvgPool2dSame if avg_stride == 1 and dilation > 1 else nn.AvgPool2d self.pool = avg_pool_fn(2, avg_stride, ceil_mode=True, count_include_pad=False) else: self.pool = nn.Identity() self.conv = conv_layer(in_chs, out_chs, 1, stride=1) self.norm = nn.Identity() if preact else norm_layer(out_chs, apply_act=False) def forward(self, x): return self.norm(self.conv(self.pool(x))) class ResNetStage(nn.Module): """ResNet Stage.""" def __init__( self, in_chs, out_chs, stride, dilation, depth, bottle_ratio=0.25, groups=1, avg_down=False, block_dpr=None, block_fn=PreActBottleneck, act_layer=None, conv_layer=None, norm_layer=None, **block_kwargs, ): super(ResNetStage, self).__init__() first_dilation = 1 if dilation in (1, 2) else 2 layer_kwargs = dict(act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer) proj_layer = DownsampleAvg if avg_down else DownsampleConv prev_chs = in_chs self.blocks = nn.Sequential() for block_idx in range(depth): drop_path_rate = block_dpr[block_idx] if block_dpr else 0. stride = stride if block_idx == 0 else 1 self.blocks.add_module(str(block_idx), block_fn( prev_chs, out_chs, stride=stride, dilation=dilation, bottle_ratio=bottle_ratio, groups=groups, first_dilation=first_dilation, proj_layer=proj_layer, drop_path_rate=drop_path_rate, **layer_kwargs, **block_kwargs, )) prev_chs = out_chs first_dilation = dilation proj_layer = None def forward(self, x): x = self.blocks(x) return x def is_stem_deep(stem_type): return any([s in stem_type for s in ('deep', 'tiered')]) def create_resnetv2_stem( in_chs, out_chs=64, stem_type='', preact=True, conv_layer=StdConv2d, norm_layer=partial(GroupNormAct, num_groups=32), ): stem = OrderedDict() assert stem_type in ('', 'fixed', 'same', 'deep', 'deep_fixed', 'deep_same', 'tiered') # NOTE conv padding mode can be changed by overriding the conv_layer def if is_stem_deep(stem_type): # A 3 deep 3x3 conv stack as in ResNet V1D models if 'tiered' in stem_type: stem_chs = (3 * out_chs // 8, out_chs // 2) # 'T' resnets in resnet.py else: stem_chs = (out_chs // 2, out_chs // 2) # 'D' ResNets stem['conv1'] = conv_layer(in_chs, stem_chs[0], kernel_size=3, stride=2) stem['norm1'] = norm_layer(stem_chs[0]) stem['conv2'] = conv_layer(stem_chs[0], stem_chs[1], kernel_size=3, stride=1) stem['norm2'] = norm_layer(stem_chs[1]) stem['conv3'] = conv_layer(stem_chs[1], out_chs, kernel_size=3, stride=1) if not preact: stem['norm3'] = norm_layer(out_chs) else: # The usual 7x7 stem conv stem['conv'] = conv_layer(in_chs, out_chs, kernel_size=7, stride=2) if not preact: stem['norm'] = norm_layer(out_chs) if 'fixed' in stem_type: # 'fixed' SAME padding approximation that is used in BiT models stem['pad'] = nn.ConstantPad2d(1, 0.) stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=0) elif 'same' in stem_type: # full, input size based 'SAME' padding, used in ViT Hybrid model stem['pool'] = create_pool2d('max', kernel_size=3, stride=2, padding='same') else: # the usual PyTorch symmetric padding stem['pool'] = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) return nn.Sequential(stem) class ResNetV2(nn.Module): """Implementation of Pre-activation (v2) ResNet mode. """ def __init__( self, layers, channels=(256, 512, 1024, 2048), num_classes=1000, in_chans=3, global_pool='avg', output_stride=32, width_factor=1, stem_chs=64, stem_type='', avg_down=False, preact=True, act_layer=nn.ReLU, norm_layer=partial(GroupNormAct, num_groups=32), conv_layer=StdConv2d, drop_rate=0., drop_path_rate=0., zero_init_last=False, ): """ Args: layers (List[int]) : number of layers in each block channels (List[int]) : number of channels in each block: num_classes (int): number of classification classes (default 1000) in_chans (int): number of input (color) channels. (default 3) global_pool (str): Global pooling type. One of 'avg', 'max', 'avgmax', 'catavgmax' (default 'avg') output_stride (int): output stride of the network, 32, 16, or 8. (default 32) width_factor (int): channel (width) multiplication factor stem_chs (int): stem width (default: 64) stem_type (str): stem type (default: '' == 7x7) avg_down (bool): average pooling in residual downsampling (default: False) preact (bool): pre-activiation (default: True) act_layer (Union[str, nn.Module]): activation layer norm_layer (Union[str, nn.Module]): normalization layer conv_layer (nn.Module): convolution module drop_rate: classifier dropout rate (default: 0.) drop_path_rate: stochastic depth rate (default: 0.) zero_init_last: zero-init last weight in residual path (default: False) """ super().__init__() self.num_classes = num_classes self.drop_rate = drop_rate wf = width_factor norm_layer = get_norm_act_layer(norm_layer, act_layer=act_layer) act_layer = get_act_layer(act_layer) self.feature_info = [] stem_chs = make_divisible(stem_chs * wf) self.stem = create_resnetv2_stem( in_chans, stem_chs, stem_type, preact, conv_layer=conv_layer, norm_layer=norm_layer, ) stem_feat = ('stem.conv3' if is_stem_deep(stem_type) else 'stem.conv') if preact else 'stem.norm' self.feature_info.append(dict(num_chs=stem_chs, reduction=2, module=stem_feat)) prev_chs = stem_chs curr_stride = 4 dilation = 1 block_dprs = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(layers)).split(layers)] block_fn = PreActBottleneck if preact else Bottleneck self.stages = nn.Sequential() for stage_idx, (d, c, bdpr) in enumerate(zip(layers, channels, block_dprs)): out_chs = make_divisible(c * wf) stride = 1 if stage_idx == 0 else 2 if curr_stride >= output_stride: dilation *= stride stride = 1 stage = ResNetStage( prev_chs, out_chs, stride=stride, dilation=dilation, depth=d, avg_down=avg_down, act_layer=act_layer, conv_layer=conv_layer, norm_layer=norm_layer, block_dpr=bdpr, block_fn=block_fn, ) prev_chs = out_chs curr_stride *= stride self.feature_info += [dict(num_chs=prev_chs, reduction=curr_stride, module=f'stages.{stage_idx}')] self.stages.add_module(str(stage_idx), stage) self.num_features = prev_chs self.norm = norm_layer(self.num_features) if preact else nn.Identity() self.head = ClassifierHead( self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate, use_conv=True, ) self.init_weights(zero_init_last=zero_init_last) self.grad_checkpointing = False @torch.jit.ignore def init_weights(self, zero_init_last=True): named_apply(partial(_init_weights, zero_init_last=zero_init_last), self) @torch.jit.ignore() def load_pretrained(self, checkpoint_path, prefix='resnet/'): _load_weights(self, checkpoint_path, prefix) @torch.jit.ignore def group_matcher(self, coarse=False): matcher = dict( stem=r'^stem', blocks=r'^stages\.(\d+)' if coarse else [ (r'^stages\.(\d+)\.blocks\.(\d+)', None), (r'^norm', (99999,)) ] ) return matcher @torch.jit.ignore def set_grad_checkpointing(self, enable=True): self.grad_checkpointing = enable @torch.jit.ignore def get_classifier(self): return self.head.fc def reset_classifier(self, num_classes, global_pool='avg'): self.num_classes = num_classes self.head.reset(num_classes, global_pool) def forward_features(self, x): x = self.stem(x) if self.grad_checkpointing and not torch.jit.is_scripting(): x = checkpoint_seq(self.stages, x, flatten=True) else: x = self.stages(x) x = self.norm(x) return x def forward_head(self, x, pre_logits: bool = False): return self.head(x, pre_logits=pre_logits) def forward(self, x): x = self.forward_features(x) x = self.forward_head(x) return x def _init_weights(module: nn.Module, name: str = '', zero_init_last=True): if isinstance(module, nn.Linear) or ('head.fc' in name and isinstance(module, nn.Conv2d)): nn.init.normal_(module.weight, mean=0.0, std=0.01) nn.init.zeros_(module.bias) elif isinstance(module, nn.Conv2d): nn.init.kaiming_normal_(module.weight, mode='fan_out', nonlinearity='relu') if module.bias is not None: nn.init.zeros_(module.bias) elif isinstance(module, (nn.BatchNorm2d, nn.LayerNorm, nn.GroupNorm)): nn.init.ones_(module.weight) nn.init.zeros_(module.bias) elif zero_init_last and hasattr(module, 'zero_init_last'): module.zero_init_last() @torch.no_grad() def _load_weights(model: nn.Module, checkpoint_path: str, prefix: str = 'resnet/'): import numpy as np def t2p(conv_weights): """Possibly convert HWIO to OIHW.""" if conv_weights.ndim == 4: conv_weights = conv_weights.transpose([3, 2, 0, 1]) return torch.from_numpy(conv_weights) weights = np.load(checkpoint_path) stem_conv_w = adapt_input_conv( model.stem.conv.weight.shape[1], t2p(weights[f'{prefix}root_block/standardized_conv2d/kernel'])) model.stem.conv.weight.copy_(stem_conv_w) model.norm.weight.copy_(t2p(weights[f'{prefix}group_norm/gamma'])) model.norm.bias.copy_(t2p(weights[f'{prefix}group_norm/beta'])) if isinstance(getattr(model.head, 'fc', None), nn.Conv2d) and \ model.head.fc.weight.shape[0] == weights[f'{prefix}head/conv2d/kernel'].shape[-1]: model.head.fc.weight.copy_(t2p(weights[f'{prefix}head/conv2d/kernel'])) model.head.fc.bias.copy_(t2p(weights[f'{prefix}head/conv2d/bias'])) for i, (sname, stage) in enumerate(model.stages.named_children()): for j, (bname, block) in enumerate(stage.blocks.named_children()): cname = 'standardized_conv2d' block_prefix = f'{prefix}block{i + 1}/unit{j + 1:02d}/' block.conv1.weight.copy_(t2p(weights[f'{block_prefix}a/{cname}/kernel'])) block.conv2.weight.copy_(t2p(weights[f'{block_prefix}b/{cname}/kernel'])) block.conv3.weight.copy_(t2p(weights[f'{block_prefix}c/{cname}/kernel'])) block.norm1.weight.copy_(t2p(weights[f'{block_prefix}a/group_norm/gamma'])) block.norm2.weight.copy_(t2p(weights[f'{block_prefix}b/group_norm/gamma'])) block.norm3.weight.copy_(t2p(weights[f'{block_prefix}c/group_norm/gamma'])) block.norm1.bias.copy_(t2p(weights[f'{block_prefix}a/group_norm/beta'])) block.norm2.bias.copy_(t2p(weights[f'{block_prefix}b/group_norm/beta'])) block.norm3.bias.copy_(t2p(weights[f'{block_prefix}c/group_norm/beta'])) if block.downsample is not None: w = weights[f'{block_prefix}a/proj/{cname}/kernel'] block.downsample.conv.weight.copy_(t2p(w)) def _create_resnetv2(variant, pretrained=False, **kwargs): feature_cfg = dict(flatten_sequential=True) return build_model_with_cfg( ResNetV2, variant, pretrained, feature_cfg=feature_cfg, **kwargs, ) def _create_resnetv2_bit(variant, pretrained=False, **kwargs): return _create_resnetv2( variant, pretrained=pretrained, stem_type='fixed', conv_layer=partial(StdConv2d, eps=1e-8), **kwargs, ) def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7), 'crop_pct': 0.875, 'interpolation': 'bilinear', 'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD, 'first_conv': 'stem.conv', 'classifier': 'head.fc', **kwargs } default_cfgs = generate_default_cfgs({ # Paper: Knowledge distillation: A good teacher is patient and consistent - https://arxiv.org/abs/2106.05237 'resnetv2_50x1_bit.goog_distilled_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', custom_load=True), 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', custom_load=True), 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384': _cfg( hf_hub_id='timm/', input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0, interpolation='bicubic', custom_load=True), # pretrained on imagenet21k, finetuned on imagenet1k 'resnetv2_50x1_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_50x3_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_101x1_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_101x3_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_152x2_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 448, 448), pool_size=(14, 14), crop_pct=1.0, custom_load=True), 'resnetv2_152x4_bit.goog_in21k_ft_in1k': _cfg( hf_hub_id='timm/', input_size=(3, 480, 480), pool_size=(15, 15), crop_pct=1.0, custom_load=True), # only one at 480x480? # trained on imagenet-21k 'resnetv2_50x1_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_50x3_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_101x1_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_101x3_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_152x2_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_152x4_bit.goog_in21k': _cfg( hf_hub_id='timm/', num_classes=21843, custom_load=True), 'resnetv2_50.a1h_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_50d.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_50t.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_101.a1h_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_101d.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_152.untrained': _cfg( interpolation='bicubic'), 'resnetv2_152d.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), 'resnetv2_50d_gn.ah_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', first_conv='stem.conv1', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_50d_evos.ah_in1k': _cfg( hf_hub_id='timm/', interpolation='bicubic', first_conv='stem.conv1', crop_pct=0.95, test_input_size=(3, 288, 288), test_crop_pct=1.0), 'resnetv2_50d_frn.untrained': _cfg( interpolation='bicubic', first_conv='stem.conv1'), }) @register_model def resnetv2_50x1_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_50x1_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=1, **kwargs) @register_model def resnetv2_50x3_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_50x3_bit', pretrained=pretrained, layers=[3, 4, 6, 3], width_factor=3, **kwargs) @register_model def resnetv2_101x1_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_101x1_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=1, **kwargs) @register_model def resnetv2_101x3_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_101x3_bit', pretrained=pretrained, layers=[3, 4, 23, 3], width_factor=3, **kwargs) @register_model def resnetv2_152x2_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_152x2_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=2, **kwargs) @register_model def resnetv2_152x4_bit(pretrained=False, **kwargs) -> ResNetV2: return _create_resnetv2_bit( 'resnetv2_152x4_bit', pretrained=pretrained, layers=[3, 8, 36, 3], width_factor=4, **kwargs) @register_model def resnetv2_50(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) return _create_resnetv2('resnetv2_50', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50t(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='tiered', avg_down=True) return _create_resnetv2('resnetv2_50t', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_101(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) return _create_resnetv2('resnetv2_101', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_101d(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 23, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_101d', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_152(pretrained=False, **kwargs) -> ResNetV2: model_args = dict(layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d) return _create_resnetv2('resnetv2_152', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_152d(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 8, 36, 3], conv_layer=create_conv2d, norm_layer=BatchNormAct2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_152d', pretrained=pretrained, **dict(model_args, **kwargs)) # Experimental configs (may change / be removed) @register_model def resnetv2_50d_gn(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=GroupNormAct, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d_gn', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d_evos(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=EvoNorm2dS0, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d_evos', pretrained=pretrained, **dict(model_args, **kwargs)) @register_model def resnetv2_50d_frn(pretrained=False, **kwargs) -> ResNetV2: model_args = dict( layers=[3, 4, 6, 3], conv_layer=create_conv2d, norm_layer=FilterResponseNormTlu2d, stem_type='deep', avg_down=True) return _create_resnetv2('resnetv2_50d_frn', pretrained=pretrained, **dict(model_args, **kwargs)) register_model_deprecations(__name__, { 'resnetv2_50x1_bitm': 'resnetv2_50x1_bit.goog_in21k_ft_in1k', 'resnetv2_50x3_bitm': 'resnetv2_50x3_bit.goog_in21k_ft_in1k', 'resnetv2_101x1_bitm': 'resnetv2_101x1_bit.goog_in21k_ft_in1k', 'resnetv2_101x3_bitm': 'resnetv2_101x3_bit.goog_in21k_ft_in1k', 'resnetv2_152x2_bitm': 'resnetv2_152x2_bit.goog_in21k_ft_in1k', 'resnetv2_152x4_bitm': 'resnetv2_152x4_bit.goog_in21k_ft_in1k', 'resnetv2_50x1_bitm_in21k': 'resnetv2_50x1_bit.goog_in21k', 'resnetv2_50x3_bitm_in21k': 'resnetv2_50x3_bit.goog_in21k', 'resnetv2_101x1_bitm_in21k': 'resnetv2_101x1_bit.goog_in21k', 'resnetv2_101x3_bitm_in21k': 'resnetv2_101x3_bit.goog_in21k', 'resnetv2_152x2_bitm_in21k': 'resnetv2_152x2_bit.goog_in21k', 'resnetv2_152x4_bitm_in21k': 'resnetv2_152x4_bit.goog_in21k', 'resnetv2_50x1_bit_distilled': 'resnetv2_50x1_bit.goog_distilled_in1k', 'resnetv2_152x2_bit_teacher': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k', 'resnetv2_152x2_bit_teacher_384': 'resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384', })
pytorch-image-models/timm/models/resnetv2.py/0
{ "file_path": "pytorch-image-models/timm/models/resnetv2.py", "repo_id": "pytorch-image-models", "token_count": 14679 }
204
""" Hybrid Vision Transformer (ViT) in PyTorch A PyTorch implement of the Hybrid Vision Transformers as described in: 'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale' - https://arxiv.org/abs/2010.11929 `How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers` - https://arxiv.org/abs/2106.10270 NOTE These hybrid model definitions depend on code in vision_transformer.py. They were moved here to keep file sizes sane. Hacked together by / Copyright 2020, Ross Wightman """ from functools import partial from typing import List, Optional, Tuple import torch import torch.nn as nn import torch.nn.functional as F from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD from timm.layers import StdConv2dSame, StdConv2d, to_2tuple, Format, nchw_to from ._registry import generate_default_cfgs, register_model, register_model_deprecations from .resnet import resnet26d, resnet50d from .resnetv2 import ResNetV2, create_resnetv2_stem from .vision_transformer import _create_vision_transformer, VisionTransformer class HybridEmbed(nn.Module): """ CNN Feature Map Embedding Extract feature map from CNN, flatten, project to embedding dim. """ output_fmt: Format dynamic_img_pad: torch.jit.Final[bool] def __init__( self, backbone, img_size=224, patch_size=1, feature_size=None, in_chans=3, embed_dim=768, bias=True, flatten: bool = True, output_fmt: Optional[str] = None, strict_img_size: bool = True, dynamic_img_pad: bool = False, ): super().__init__() assert isinstance(backbone, nn.Module) img_size = to_2tuple(img_size) patch_size = to_2tuple(patch_size) self.img_size = img_size self.patch_size = patch_size self.backbone = backbone if feature_size is None: with torch.no_grad(): # NOTE Most reliable way of determining output dims is to run forward pass training = backbone.training if training: backbone.eval() o = self.backbone(torch.zeros(1, in_chans, img_size[0], img_size[1])) if isinstance(o, (list, tuple)): o = o[-1] # last feature if backbone outputs list/tuple of features feature_size = o.shape[-2:] feature_dim = o.shape[1] backbone.train(training) else: feature_size = to_2tuple(feature_size) if hasattr(self.backbone, 'feature_info'): feature_dim = self.backbone.feature_info.channels()[-1] else: feature_dim = self.backbone.num_features if not dynamic_img_pad: assert feature_size[0] % patch_size[0] == 0 and feature_size[1] % patch_size[1] == 0 self.grid_size = (feature_size[0] // patch_size[0], feature_size[1] // patch_size[1]) self.num_patches = self.grid_size[0] * self.grid_size[1] if output_fmt is not None: self.flatten = False self.output_fmt = Format(output_fmt) else: # flatten spatial dim and transpose to channels last, kept for bwd compat self.flatten = flatten self.output_fmt = Format.NCHW self.strict_img_size = strict_img_size self.dynamic_img_pad = dynamic_img_pad self.proj = nn.Conv2d(feature_dim, embed_dim, kernel_size=patch_size, stride=patch_size, bias=bias) def forward(self, x): x = self.backbone(x) if isinstance(x, (list, tuple)): x = x[-1] # last feature if backbone outputs list/tuple of features _, _, H, W = x.shape if self.dynamic_img_pad: pad_h = (self.patch_size[0] - H % self.patch_size[0]) % self.patch_size[0] pad_w = (self.patch_size[1] - W % self.patch_size[1]) % self.patch_size[1] x = F.pad(x, (0, pad_w, 0, pad_h)) x = self.proj(x) if self.flatten: x = x.flatten(2).transpose(1, 2) # NCHW -> NLC elif self.output_fmt != Format.NCHW: x = nchw_to(x, self.output_fmt) return x class HybridEmbedWithSize(nn.Module): """ CNN Feature Map Embedding Extract feature map from CNN, flatten, project to embedding dim. """ def __init__( self, backbone, img_size=224, patch_size=1, feature_size=None, in_chans=3, embed_dim=768, bias=True, ): super().__init__( backbone=backbone, img_size=img_size, patch_size=patch_size, feature_size=feature_size, in_chans=in_chans, embed_dim=embed_dim, bias=bias, ) def forward(self, x) -> Tuple[torch.Tensor, List[int]]: x = self.backbone(x) if isinstance(x, (list, tuple)): x = x[-1] # last feature if backbone outputs list/tuple of features x = self.proj(x) return x.flatten(2).transpose(1, 2), x.shape[-2:] def _create_vision_transformer_hybrid(variant, backbone, pretrained=False, **kwargs): embed_layer = partial(HybridEmbed, backbone=backbone) kwargs.setdefault('patch_size', 1) # default patch size for hybrid models if not set return _create_vision_transformer(variant, pretrained=pretrained, embed_layer=embed_layer, **kwargs) def _resnetv2(layers=(3, 4, 9), **kwargs): """ ResNet-V2 backbone helper""" padding_same = kwargs.get('padding_same', True) stem_type = 'same' if padding_same else '' conv_layer = partial(StdConv2dSame, eps=1e-8) if padding_same else partial(StdConv2d, eps=1e-8) if len(layers): backbone = ResNetV2( layers=layers, num_classes=0, global_pool='', in_chans=kwargs.get('in_chans', 3), preact=False, stem_type=stem_type, conv_layer=conv_layer) else: backbone = create_resnetv2_stem( kwargs.get('in_chans', 3), stem_type=stem_type, preact=False, conv_layer=conv_layer) return backbone def _cfg(url='', **kwargs): return { 'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None, 'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True, 'mean': (0.5, 0.5, 0.5), 'std': (0.5, 0.5, 0.5), 'first_conv': 'patch_embed.backbone.stem.conv', 'classifier': 'head', **kwargs } default_cfgs = generate_default_cfgs({ # hybrid in-1k models (weights from official JAX impl where they exist) 'vit_tiny_r_s16_p8_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True, first_conv='patch_embed.backbone.conv'), 'vit_tiny_r_s16_p8_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', first_conv='patch_embed.backbone.conv', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), 'vit_small_r26_s32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_light0-wd_0.03-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.03-res_224.npz', hf_hub_id='timm/', custom_load=True, ), 'vit_small_r26_s32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True), 'vit_base_r26_s32_224.untrained': _cfg(), 'vit_base_r50_s16_384.orig_in21k_ft_in1k': _cfg( url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_384-9fd3c705.pth', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0), 'vit_large_r50_s32_224.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_224.npz', hf_hub_id='timm/', custom_load=True, ), 'vit_large_r50_s32_384.augreg_in21k_ft_in1k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz', hf_hub_id='timm/', input_size=(3, 384, 384), crop_pct=1.0, custom_load=True, ), # hybrid in-21k models (weights from official Google JAX impl where they exist) 'vit_tiny_r_s16_p8_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R_Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, first_conv='patch_embed.backbone.conv', custom_load=True), 'vit_small_r26_s32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R26_S_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.03-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, custom_load=True), 'vit_base_r50_s16_224.orig_in21k': _cfg( #url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_resnet50_224_in21k-6f7c7740.pth', hf_hub_id='timm/', num_classes=0, crop_pct=0.9), 'vit_large_r50_s32_224.augreg_in21k': _cfg( url='https://storage.googleapis.com/vit_models/augreg/R50_L_32-i21k-300ep-lr_0.001-aug_medium2-wd_0.1-do_0.0-sd_0.0.npz', hf_hub_id='timm/', num_classes=21843, crop_pct=0.9, custom_load=True), # hybrid models (using timm resnet backbones) 'vit_small_resnet26d_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_small_resnet50d_s16_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_resnet26d_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), 'vit_base_resnet50d_224.untrained': _cfg( mean=IMAGENET_DEFAULT_MEAN, std=IMAGENET_DEFAULT_STD, first_conv='patch_embed.backbone.conv1.0'), }) @register_model def vit_tiny_r_s16_p8_224(pretrained=False, **kwargs) -> VisionTransformer: """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 224 x 224. """ backbone = _resnetv2(layers=(), **kwargs) model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer_hybrid( 'vit_tiny_r_s16_p8_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_tiny_r_s16_p8_384(pretrained=False, **kwargs) -> VisionTransformer: """ R+ViT-Ti/S16 w/ 8x8 patch hybrid @ 384 x 384. """ backbone = _resnetv2(layers=(), **kwargs) model_args = dict(patch_size=8, embed_dim=192, depth=12, num_heads=3) model = _create_vision_transformer_hybrid( 'vit_tiny_r_s16_p8_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: """ R26+ViT-S/S32 hybrid. """ backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer_hybrid( 'vit_small_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_r26_s32_384(pretrained=False, **kwargs) -> VisionTransformer: """ R26+ViT-S/S32 hybrid. """ backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=384, depth=12, num_heads=6) model = _create_vision_transformer_hybrid( 'vit_small_r26_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r26_s32_224(pretrained=False, **kwargs) -> VisionTransformer: """ R26+ViT-B/S32 hybrid. """ backbone = _resnetv2((2, 2, 2, 2), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_r26_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r50_s16_224(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-B/S16 hybrid from original paper (https://arxiv.org/abs/2010.11929). """ backbone = _resnetv2((3, 4, 9), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_r50_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_r50_s16_384(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-B/16 hybrid from original paper (https://arxiv.org/abs/2010.11929). ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer. """ backbone = _resnetv2((3, 4, 9), **kwargs) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_r50_s16_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_r50_s32_224(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-L/S32 hybrid. """ backbone = _resnetv2((3, 4, 6, 3), **kwargs) model_args = dict(embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer_hybrid( 'vit_large_r50_s32_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_large_r50_s32_384(pretrained=False, **kwargs) -> VisionTransformer: """ R50+ViT-L/S32 hybrid. """ backbone = _resnetv2((3, 4, 6, 3), **kwargs) model_args = dict(embed_dim=1024, depth=24, num_heads=16) model = _create_vision_transformer_hybrid( 'vit_large_r50_s32_384', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT small hybrid w/ ResNet26D stride 32. No pretrained weights. """ backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) model = _create_vision_transformer_hybrid( 'vit_small_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_small_resnet50d_s16_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT small hybrid w/ ResNet50D 3-stages, stride 16. No pretrained weights. """ backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[3]) model_args = dict(embed_dim=768, depth=8, num_heads=8, mlp_ratio=3) model = _create_vision_transformer_hybrid( 'vit_small_resnet50d_s16_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_resnet26d_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT base hybrid w/ ResNet26D stride 32. No pretrained weights. """ backbone = resnet26d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_resnet26d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model @register_model def vit_base_resnet50d_224(pretrained=False, **kwargs) -> VisionTransformer: """ Custom ViT base hybrid w/ ResNet50D stride 32. No pretrained weights. """ backbone = resnet50d(pretrained=pretrained, in_chans=kwargs.get('in_chans', 3), features_only=True, out_indices=[4]) model_args = dict(embed_dim=768, depth=12, num_heads=12) model = _create_vision_transformer_hybrid( 'vit_base_resnet50d_224', backbone=backbone, pretrained=pretrained, **dict(model_args, **kwargs)) return model register_model_deprecations(__name__, { 'vit_tiny_r_s16_p8_224_in21k': 'vit_tiny_r_s16_p8_224.augreg_in21k', 'vit_small_r26_s32_224_in21k': 'vit_small_r26_s32_224.augreg_in21k', 'vit_base_r50_s16_224_in21k': 'vit_base_r50_s16_224.orig_in21k', 'vit_base_resnet50_224_in21k': 'vit_base_r50_s16_224.orig_in21k', 'vit_large_r50_s32_224_in21k': 'vit_large_r50_s32_224.augreg_in21k', 'vit_base_resnet50_384': 'vit_base_r50_s16_384.orig_in21k_ft_in1k' })
pytorch-image-models/timm/models/vision_transformer_hybrid.py/0
{ "file_path": "pytorch-image-models/timm/models/vision_transformer_hybrid.py", "repo_id": "pytorch-image-models", "token_count": 8049 }
205
""" PyTorch LARS / LARC Optimizer An implementation of LARS (SGD) + LARC in PyTorch Based on: * PyTorch SGD: https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 * NVIDIA APEX LARC: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py Additional cleanup and modifications to properly support PyTorch XLA. Copyright 2021 Ross Wightman """ import torch from torch.optim.optimizer import Optimizer class Lars(Optimizer): """ LARS for PyTorch Paper: `Large batch training of Convolutional Networks` - https://arxiv.org/pdf/1708.03888.pdf Args: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate (default: 1.0). momentum (float, optional): momentum factor (default: 0) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) dampening (float, optional): dampening for momentum (default: 0) nesterov (bool, optional): enables Nesterov momentum (default: False) trust_coeff (float): trust coefficient for computing adaptive lr / trust_ratio (default: 0.001) eps (float): eps for division denominator (default: 1e-8) trust_clip (bool): enable LARC trust ratio clipping (default: False) always_adapt (bool): always apply LARS LR adapt, otherwise only when group weight_decay != 0 (default: False) """ def __init__( self, params, lr=1.0, momentum=0, dampening=0, weight_decay=0, nesterov=False, trust_coeff=0.001, eps=1e-8, trust_clip=False, always_adapt=False, ): if lr < 0.0: raise ValueError(f"Invalid learning rate: {lr}") if momentum < 0.0: raise ValueError(f"Invalid momentum value: {momentum}") if weight_decay < 0.0: raise ValueError(f"Invalid weight_decay value: {weight_decay}") if nesterov and (momentum <= 0 or dampening != 0): raise ValueError("Nesterov momentum requires a momentum and zero dampening") defaults = dict( lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov, trust_coeff=trust_coeff, eps=eps, trust_clip=trust_clip, always_adapt=always_adapt, ) super().__init__(params, defaults) def __setstate__(self, state): super().__setstate__(state) for group in self.param_groups: group.setdefault("nesterov", False) @torch.no_grad() def step(self, closure=None): """Performs a single optimization step. Args: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: with torch.enable_grad(): loss = closure() device = self.param_groups[0]['params'][0].device one_tensor = torch.tensor(1.0, device=device) # because torch.where doesn't handle scalars correctly for group in self.param_groups: weight_decay = group['weight_decay'] momentum = group['momentum'] dampening = group['dampening'] nesterov = group['nesterov'] trust_coeff = group['trust_coeff'] eps = group['eps'] for p in group['params']: if p.grad is None: continue grad = p.grad # apply LARS LR adaptation, LARC clipping, weight decay # ref: https://github.com/NVIDIA/apex/blob/master/apex/parallel/LARC.py if weight_decay != 0 or group['always_adapt']: w_norm = p.norm(2.0) g_norm = grad.norm(2.0) trust_ratio = trust_coeff * w_norm / (g_norm + w_norm * weight_decay + eps) # FIXME nested where required since logical and/or not working in PT XLA trust_ratio = torch.where( w_norm > 0, torch.where(g_norm > 0, trust_ratio, one_tensor), one_tensor, ) if group['trust_clip']: trust_ratio = torch.minimum(trust_ratio / group['lr'], one_tensor) grad.add_(p, alpha=weight_decay) grad.mul_(trust_ratio) # apply SGD update https://github.com/pytorch/pytorch/blob/1.7/torch/optim/sgd.py#L100 if momentum != 0: param_state = self.state[p] if 'momentum_buffer' not in param_state: buf = param_state['momentum_buffer'] = torch.clone(grad).detach() else: buf = param_state['momentum_buffer'] buf.mul_(momentum).add_(grad, alpha=1. - dampening) if nesterov: grad = grad.add(buf, alpha=momentum) else: grad = buf p.add_(grad, alpha=-group['lr']) return loss
pytorch-image-models/timm/optim/lars.py/0
{ "file_path": "pytorch-image-models/timm/optim/lars.py", "repo_id": "pytorch-image-models", "token_count": 2571 }
206
""" Polynomial Scheduler Polynomial LR schedule with warmup, noise. Hacked together by / Copyright 2021 Ross Wightman """ import math import logging import torch from .scheduler import Scheduler _logger = logging.getLogger(__name__) class PolyLRScheduler(Scheduler): """ Polynomial LR Scheduler w/ warmup, noise, and k-decay k-decay option based on `k-decay: A New Method For Learning Rate Schedule` - https://arxiv.org/abs/2004.05909 """ def __init__( self, optimizer: torch.optim.Optimizer, t_initial: int, power: float = 0.5, lr_min: float = 0., cycle_mul: float = 1., cycle_decay: float = 1., cycle_limit: int = 1, warmup_t=0, warmup_lr_init=0, warmup_prefix=False, t_in_epochs=True, noise_range_t=None, noise_pct=0.67, noise_std=1.0, noise_seed=42, k_decay=1.0, initialize=True, ) -> None: super().__init__( optimizer, param_group_field="lr", t_in_epochs=t_in_epochs, noise_range_t=noise_range_t, noise_pct=noise_pct, noise_std=noise_std, noise_seed=noise_seed, initialize=initialize ) assert t_initial > 0 assert lr_min >= 0 if t_initial == 1 and cycle_mul == 1 and cycle_decay == 1: _logger.warning("Cosine annealing scheduler will have no effect on the learning " "rate since t_initial = t_mul = eta_mul = 1.") self.t_initial = t_initial self.power = power self.lr_min = lr_min self.cycle_mul = cycle_mul self.cycle_decay = cycle_decay self.cycle_limit = cycle_limit self.warmup_t = warmup_t self.warmup_lr_init = warmup_lr_init self.warmup_prefix = warmup_prefix self.k_decay = k_decay if self.warmup_t: self.warmup_steps = [(v - warmup_lr_init) / self.warmup_t for v in self.base_values] super().update_groups(self.warmup_lr_init) else: self.warmup_steps = [1 for _ in self.base_values] def _get_lr(self, t): if t < self.warmup_t: lrs = [self.warmup_lr_init + t * s for s in self.warmup_steps] else: if self.warmup_prefix: t = t - self.warmup_t if self.cycle_mul != 1: i = math.floor(math.log(1 - t / self.t_initial * (1 - self.cycle_mul), self.cycle_mul)) t_i = self.cycle_mul ** i * self.t_initial t_curr = t - (1 - self.cycle_mul ** i) / (1 - self.cycle_mul) * self.t_initial else: i = t // self.t_initial t_i = self.t_initial t_curr = t - (self.t_initial * i) gamma = self.cycle_decay ** i lr_max_values = [v * gamma for v in self.base_values] k = self.k_decay if i < self.cycle_limit: lrs = [ self.lr_min + (lr_max - self.lr_min) * (1 - t_curr ** k / t_i ** k) ** self.power for lr_max in lr_max_values ] else: lrs = [self.lr_min for _ in self.base_values] return lrs def get_cycle_length(self, cycles=0): cycles = max(1, cycles or self.cycle_limit) if self.cycle_mul == 1.0: return self.t_initial * cycles else: return int(math.floor(-self.t_initial * (self.cycle_mul ** cycles - 1) / (1 - self.cycle_mul)))
pytorch-image-models/timm/scheduler/poly_lr.py/0
{ "file_path": "pytorch-image-models/timm/scheduler/poly_lr.py", "repo_id": "pytorch-image-models", "token_count": 1967 }
207
""" Model / state_dict utils Hacked together by / Copyright 2020 Ross Wightman """ import fnmatch from copy import deepcopy import torch from torchvision.ops.misc import FrozenBatchNorm2d from timm.layers import BatchNormAct2d, SyncBatchNormAct, FrozenBatchNormAct2d,\ freeze_batch_norm_2d, unfreeze_batch_norm_2d from .model_ema import ModelEma def unwrap_model(model): if isinstance(model, ModelEma): return unwrap_model(model.ema) else: return model.module if hasattr(model, 'module') else model def get_state_dict(model, unwrap_fn=unwrap_model): return unwrap_fn(model).state_dict() def avg_sq_ch_mean(model, input, output): """ calculate average channel square mean of output activations """ return torch.mean(output.mean(axis=[0, 2, 3]) ** 2).item() def avg_ch_var(model, input, output): """ calculate average channel variance of output activations """ return torch.mean(output.var(axis=[0, 2, 3])).item() def avg_ch_var_residual(model, input, output): """ calculate average channel variance of output activations """ return torch.mean(output.var(axis=[0, 2, 3])).item() class ActivationStatsHook: """Iterates through each of `model`'s modules and matches modules using unix pattern matching based on `hook_fn_locs` and registers `hook_fn` to the module if there is a match. Arguments: model (nn.Module): model from which we will extract the activation stats hook_fn_locs (List[str]): List of `hook_fn` locations based on Unix type string matching with the name of model's modules. hook_fns (List[Callable]): List of hook functions to be registered at every module in `layer_names`. Inspiration from https://docs.fast.ai/callback.hook.html. Refer to https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 for an example on how to plot Signal Propogation Plots using `ActivationStatsHook`. """ def __init__(self, model, hook_fn_locs, hook_fns): self.model = model self.hook_fn_locs = hook_fn_locs self.hook_fns = hook_fns if len(hook_fn_locs) != len(hook_fns): raise ValueError("Please provide `hook_fns` for each `hook_fn_locs`, \ their lengths are different.") self.stats = dict((hook_fn.__name__, []) for hook_fn in hook_fns) for hook_fn_loc, hook_fn in zip(hook_fn_locs, hook_fns): self.register_hook(hook_fn_loc, hook_fn) def _create_hook(self, hook_fn): def append_activation_stats(module, input, output): out = hook_fn(module, input, output) self.stats[hook_fn.__name__].append(out) return append_activation_stats def register_hook(self, hook_fn_loc, hook_fn): for name, module in self.model.named_modules(): if not fnmatch.fnmatch(name, hook_fn_loc): continue module.register_forward_hook(self._create_hook(hook_fn)) def extract_spp_stats( model, hook_fn_locs, hook_fns, input_shape=[8, 3, 224, 224]): """Extract average square channel mean and variance of activations during forward pass to plot Signal Propogation Plots (SPP). Paper: https://arxiv.org/abs/2101.08692 Example Usage: https://gist.github.com/amaarora/6e56942fcb46e67ba203f3009b30d950 """ x = torch.normal(0., 1., input_shape) hook = ActivationStatsHook(model, hook_fn_locs=hook_fn_locs, hook_fns=hook_fns) _ = model(x) return hook.stats def _freeze_unfreeze(root_module, submodules=[], include_bn_running_stats=True, mode='freeze'): """ Freeze or unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module, optional): Root module relative to which the `submodules` are referenced. submodules (list[str]): List of modules for which the parameters will be (un)frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be (un)frozen. Defaults to [] include_bn_running_stats (bool): Whether to also (un)freeze the running statistics of batch norm 2d layers. Defaults to `True`. mode (bool): Whether to freeze ("freeze") or unfreeze ("unfreeze"). Defaults to `"freeze"`. """ assert mode in ["freeze", "unfreeze"], '`mode` must be one of "freeze" or "unfreeze"' if isinstance(root_module, ( torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm, BatchNormAct2d, SyncBatchNormAct, )): # Raise assertion here because we can't convert it in place raise AssertionError( "You have provided a batch norm layer as the `root module`. Please use " "`timm.utils.model.freeze_batch_norm_2d` or `timm.utils.model.unfreeze_batch_norm_2d` instead.") if isinstance(submodules, str): submodules = [submodules] named_modules = submodules submodules = [root_module.get_submodule(m) for m in submodules] if not len(submodules): named_modules, submodules = list(zip(*root_module.named_children())) for n, m in zip(named_modules, submodules): # (Un)freeze parameters for p in m.parameters(): p.requires_grad = False if mode == 'freeze' else True if include_bn_running_stats: # Helper to add submodule specified as a named_module def _add_submodule(module, name, submodule): split = name.rsplit('.', 1) if len(split) > 1: module.get_submodule(split[0]).add_module(split[1], submodule) else: module.add_module(name, submodule) # Freeze batch norm if mode == 'freeze': res = freeze_batch_norm_2d(m) # It's possible that `m` is a type of BatchNorm in itself, in which case `unfreeze_batch_norm_2d` won't # convert it in place, but will return the converted result. In this case `res` holds the converted # result and we may try to re-assign the named module if isinstance(m, ( torch.nn.modules.batchnorm.BatchNorm2d, torch.nn.modules.batchnorm.SyncBatchNorm, BatchNormAct2d, SyncBatchNormAct, )): _add_submodule(root_module, n, res) # Unfreeze batch norm else: res = unfreeze_batch_norm_2d(m) # Ditto. See note above in mode == 'freeze' branch if isinstance(m, (FrozenBatchNorm2d, FrozenBatchNormAct2d)): _add_submodule(root_module, n, res) def freeze(root_module, submodules=[], include_bn_running_stats=True): """ Freeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module): Root module relative to which `submodules` are referenced. submodules (list[str]): List of modules for which the parameters will be frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be frozen. Defaults to `[]`. include_bn_running_stats (bool): Whether to also freeze the running statistics of `BatchNorm2d` and `SyncBatchNorm` layers. These will be converted to `FrozenBatchNorm2d` in place. Hint: During fine tuning, it's good practice to freeze batch norm stats. And note that these are different to the affine parameters which are just normal PyTorch parameters. Defaults to `True`. Hint: If you want to freeze batch norm ONLY, use `timm.utils.model.freeze_batch_norm_2d`. Examples:: >>> model = timm.create_model('resnet18') >>> # Freeze up to and including layer2 >>> submodules = [n for n, _ in model.named_children()] >>> print(submodules) ['conv1', 'bn1', 'act1', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4', 'global_pool', 'fc'] >>> freeze(model, submodules[:submodules.index('layer2') + 1]) >>> # Check for yourself that it works as expected >>> print(model.layer2[0].conv1.weight.requires_grad) False >>> print(model.layer3[0].conv1.weight.requires_grad) True >>> # Unfreeze >>> unfreeze(model) """ _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="freeze") def unfreeze(root_module, submodules=[], include_bn_running_stats=True): """ Unfreeze parameters of the specified modules and those of all their hierarchical descendants. This is done in place. Args: root_module (nn.Module): Root module relative to which `submodules` are referenced. submodules (list[str]): List of submodules for which the parameters will be (un)frozen. They are to be provided as named modules relative to the root module (accessible via `root_module.named_modules()`). An empty list means that the whole root module will be unfrozen. Defaults to `[]`. include_bn_running_stats (bool): Whether to also unfreeze the running statistics of `FrozenBatchNorm2d` layers. These will be converted to `BatchNorm2d` in place. Defaults to `True`. See example in docstring for `freeze`. """ _freeze_unfreeze(root_module, submodules, include_bn_running_stats=include_bn_running_stats, mode="unfreeze") def reparameterize_model(model: torch.nn.Module, inplace=False) -> torch.nn.Module: if not inplace: model = deepcopy(model) def _fuse(m): for child_name, child in m.named_children(): if hasattr(child, 'fuse'): setattr(m, child_name, child.fuse()) elif hasattr(child, "reparameterize"): child.reparameterize() elif hasattr(child, "switch_to_deploy"): child.switch_to_deploy() _fuse(child) _fuse(model) return model
pytorch-image-models/timm/utils/model.py/0
{ "file_path": "pytorch-image-models/timm/utils/model.py", "repo_id": "pytorch-image-models", "token_count": 4233 }
208
use std::time::{Duration, Instant}; use text_generation_client::{ Batch, CachedBatch, ClientError, NextTokenChooserParameters, Request, ShardedClient, StoppingCriteriaParameters, }; use tokenizers::{Tokenizer, TruncationDirection}; use tokio::sync::{broadcast, mpsc}; const LOREM_IPSUM: &str = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."; #[derive(Debug, Clone)] pub(crate) struct Prefill { pub(crate) latency: Duration, pub(crate) throughput: f64, } #[derive(Debug, Clone)] pub(crate) struct Decode { pub(crate) latency: Duration, pub(crate) token_latency: Duration, pub(crate) throughput: f64, } #[derive(Debug)] pub(crate) enum Message { Warmup, Prefill(Prefill), Decode(Decode), EndRun, EndBatch, } /// Benchmarking task #[allow(clippy::too_many_arguments)] pub(crate) async fn generation_task( tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, parameters: NextTokenChooserParameters, client: ShardedClient, run_sender: mpsc::Sender<Result<Message, ClientError>>, mut shutdown_receiver: broadcast::Receiver<()>, _shutdown_guard_sender: mpsc::Sender<()>, ) { // End task if a message is received on shutdown_receiver // _shutdown_guard_sender will be dropped once the task is finished tokio::select! { res = generate_runs(tokenizer, batch_size, sequence_length, decode_length, top_n_tokens, n_runs, warmups, parameters, client, run_sender.clone()) => { if let Err(err) = res { run_sender.send(Err(err)).await.unwrap_or(()); } }, _ = shutdown_receiver.recv() => {} } } /// Benchmark prefill/decode #[allow(clippy::too_many_arguments)] async fn generate_runs( tokenizer: Tokenizer, batch_size: Vec<u32>, sequence_length: u32, decode_length: u32, top_n_tokens: Option<u32>, n_runs: usize, warmups: usize, parameters: NextTokenChooserParameters, mut client: ShardedClient, run_sender: mpsc::Sender<Result<Message, ClientError>>, ) -> Result<(), ClientError> { // Create a dummy sequence let sequence = create_sequence(sequence_length, tokenizer); for b in batch_size { // Warmups on batch size for _ in 0..warmups { let (_, decode_batch) = prefill( sequence.clone(), sequence_length, b, decode_length, parameters.clone(), top_n_tokens, &mut client, ) .await?; let _ = decode(decode_batch, &mut client).await?; // Send warmup message run_sender.send(Ok(Message::Warmup)).await.unwrap_or(()); } for _ in 0..n_runs { let (prefill, decode_batch) = prefill( sequence.clone(), sequence_length, b, decode_length, parameters.clone(), top_n_tokens, &mut client, ) .await?; // Send prefill message run_sender .send(Ok(Message::Prefill(prefill))) .await .unwrap_or(()); let decode = decode(decode_batch, &mut client).await?; // Send decode message run_sender .send(Ok(Message::Decode(decode))) .await .unwrap_or(()); // Send run ended message run_sender.send(Ok(Message::EndRun)).await.unwrap_or(()); } // Batch ended run_sender.send(Ok(Message::EndBatch)).await.unwrap_or(()); } Ok(()) } // Run a prefill step async fn prefill( sequence: String, sequence_length: u32, batch_size: u32, decode_length: u32, parameters: NextTokenChooserParameters, top_n_tokens: Option<u32>, client: &mut ShardedClient, ) -> Result<(Prefill, CachedBatch), ClientError> { // Create requests let requests = (0..batch_size) .map(|id| Request { id: id.into(), prefill_logprobs: false, inputs: sequence.clone(), truncate: sequence_length, parameters: Some(parameters.clone()), stopping_parameters: Some(StoppingCriteriaParameters { max_new_tokens: decode_length, stop_sequences: vec![], ignore_eos_token: true, // Will not stop even if a eos token is generated }), top_n_tokens: top_n_tokens.unwrap_or(0), }) .collect(); let batch = Batch { id: 0, requests, size: batch_size, max_tokens: batch_size * (sequence_length + decode_length), }; // Run prefill let start_time = Instant::now(); let (_, decode_batch, _) = client.prefill(batch.clone()).await?; // Get latency let latency = start_time.elapsed(); // Compute throughput from latency and batch size let throughput = batch_size as f64 / latency.as_secs_f64(); // Decode batch cannot be empty let decode_batch = decode_batch.expect("decode_batch is None. This is a bug."); let step = Prefill { latency, throughput, }; Ok((step, decode_batch)) } /// Run a full decode async fn decode(batch: CachedBatch, client: &mut ShardedClient) -> Result<Decode, ClientError> { let mut decode_length = 0; let batch_size = batch.size; let start_time = Instant::now(); // Full decode over decode length let mut next_batch = Some(batch); while let Some(batch) = next_batch { let result = client.decode(vec![batch]).await?; next_batch = result.1; decode_length += 1; } // Get latency let latency = start_time.elapsed(); let token_latency = latency / decode_length; // Compute throughput from latency, batch size and decode length let throughput = (batch_size * decode_length) as f64 / latency.as_secs_f64(); let step = Decode { latency, token_latency, throughput, }; Ok(step) } /// Create a dummy sequence of the correct length fn create_sequence(sequence_length: u32, tokenizer: Tokenizer) -> String { let lorem_ipsum_length = tokenizer.encode(LOREM_IPSUM, true).unwrap().len(); // Repeat lorem ipsum to cover sequence length let string_sequence = LOREM_IPSUM.repeat((0..sequence_length).step_by(lorem_ipsum_length).len()); // Encode sequence let mut encoding = tokenizer.encode(string_sequence, true).unwrap(); // Truncate to sequence_length encoding.truncate(sequence_length as usize, 0, TruncationDirection::Left); // Decode tokenizer.decode(encoding.get_ids(), false).unwrap() }
text-generation-inference/benchmark/src/generation.rs/0
{ "file_path": "text-generation-inference/benchmark/src/generation.rs", "repo_id": "text-generation-inference", "token_count": 3201 }
209
import json import requests from aiohttp import ClientSession, ClientTimeout from pydantic import ValidationError from typing import Dict, Optional, List, AsyncIterator, Iterator, Union from text_generation.types import ( StreamResponse, Response, Request, Parameters, Grammar, ChatRequest, ChatCompletionChunk, ChatComplete, Message, Tool, ) from text_generation.errors import parse_error class Client: """Client to make calls to a text-generation-inference instance Example: ```python >>> from text_generation import Client >>> client = Client("https://api-inference.huggingface.co/models/bigscience/bloomz") >>> client.generate("Why is the sky blue?").generated_text ' Rayleigh scattering' >>> result = "" >>> for response in client.generate_stream("Why is the sky blue?"): >>> if not response.token.special: >>> result += response.token.text >>> result ' Rayleigh scattering' ``` """ def __init__( self, base_url: str, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: int = 10, ): """ Args: base_url (`str`): text-generation-inference instance base url headers (`Optional[Dict[str, str]]`): Additional headers cookies (`Optional[Dict[str, str]]`): Cookies to include in the requests timeout (`int`): Timeout in seconds """ self.base_url = base_url self.headers = headers self.cookies = cookies self.timeout = timeout def chat( self, messages: List[Message], frequency_penalty: Optional[float] = None, logit_bias: Optional[List[float]] = None, logprobs: Optional[bool] = None, top_logprobs: Optional[int] = None, max_tokens: Optional[int] = None, n: Optional[int] = None, presence_penalty: Optional[float] = None, stream: bool = False, seed: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, tools: Optional[List[Tool]] = None, tool_choice: Optional[str] = None, ): """ Given a list of messages, generate a response asynchronously Args: messages (`List[Message]`): List of messages frequency_penalty (`float`): The parameter for frequency penalty. 0.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. logit_bias (`List[float]`): Adjust the likelihood of specified tokens logprobs (`bool`): Include log probabilities in the response top_logprobs (`int`): Include the `n` most likely tokens at each step max_tokens (`int`): Maximum number of generated tokens n (`int`): Generate `n` completions presence_penalty (`float`): The parameter for presence penalty. 0.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. stream (`bool`): Stream the response seed (`int`): Random sampling seed temperature (`float`): The value used to module the logits distribution. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation tools (`List[Tool]`): List of tools to use tool_choice (`str`): The tool to use """ request = ChatRequest( model="tgi", messages=messages, frequency_penalty=frequency_penalty, logit_bias=logit_bias, logprobs=logprobs, top_logprobs=top_logprobs, max_tokens=max_tokens, n=n, presence_penalty=presence_penalty, stream=stream, seed=seed, temperature=temperature, top_p=top_p, tools=tools, tool_choice=tool_choice, ) if not stream: resp = requests.post( f"{self.base_url}/v1/chat/completions", json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) return ChatComplete(**payload) else: return self._chat_stream_response(request) def _chat_stream_response(self, request): resp = requests.post( f"{self.base_url}/v1/chat/completions", json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout, stream=True, ) # iterate and print stream for byte_payload in resp.iter_lines(): if byte_payload == b"\n": continue payload = byte_payload.decode("utf-8") if payload.startswith("data:"): json_payload = json.loads(payload.lstrip("data:").rstrip("\n")) try: response = ChatCompletionChunk(**json_payload) yield response except ValidationError: raise parse_error(resp.status, json_payload) def generate( self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, decoder_input_details: bool = False, top_n_tokens: Optional[int] = None, grammar: Optional[Grammar] = None, ) -> Response: """ Given a prompt, generate the following text Args: prompt (`str`): Input text do_sample (`bool`): Activate logits sampling max_new_tokens (`int`): Maximum number of generated tokens best_of (`int`): Generate best_of sequences and return the one if the highest token logprobs repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. return_full_text (`bool`): Whether to prepend the prompt to the generated text seed (`int`): Random sampling seed stop_sequences (`List[str]`): Stop generating tokens if a member of `stop_sequences` is generated temperature (`float`): The value used to module the logits distribution. top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. truncate (`int`): Truncate inputs tokens to the given size typical_p (`float`): Typical Decoding mass See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information watermark (`bool`): Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) decoder_input_details (`bool`): Return the decoder input token logprobs and ids top_n_tokens (`int`): Return the `n` most likely tokens at each step grammar (`Grammar`): Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation of the text to match a regular expression or JSON schema. Returns: Response: generated response """ # Validate parameters parameters = Parameters( best_of=best_of, details=True, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, decoder_input_details=decoder_input_details, top_n_tokens=top_n_tokens, grammar=grammar, ) request = Request(inputs=prompt, stream=False, parameters=parameters) resp = requests.post( self.base_url, json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout, ) payload = resp.json() if resp.status_code != 200: raise parse_error(resp.status_code, payload) return Response(**payload[0]) def generate_stream( self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, top_n_tokens: Optional[int] = None, grammar: Optional[Grammar] = None, ) -> Iterator[StreamResponse]: """ Given a prompt, generate the following stream of tokens Args: prompt (`str`): Input text do_sample (`bool`): Activate logits sampling max_new_tokens (`int`): Maximum number of generated tokens repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. return_full_text (`bool`): Whether to prepend the prompt to the generated text seed (`int`): Random sampling seed stop_sequences (`List[str]`): Stop generating tokens if a member of `stop_sequences` is generated temperature (`float`): The value used to module the logits distribution. top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. truncate (`int`): Truncate inputs tokens to the given size typical_p (`float`): Typical Decoding mass See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information watermark (`bool`): Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) top_n_tokens (`int`): Return the `n` most likely tokens at each step grammar (`Grammar`): Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation of the text to match a regular expression or JSON schema. Returns: Iterator[StreamResponse]: stream of generated tokens """ # Validate parameters parameters = Parameters( best_of=None, details=True, decoder_input_details=False, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, top_n_tokens=top_n_tokens, grammar=grammar, ) request = Request(inputs=prompt, stream=True, parameters=parameters) resp = requests.post( self.base_url, json=request.dict(), headers=self.headers, cookies=self.cookies, timeout=self.timeout, stream=True, ) if resp.status_code != 200: raise parse_error(resp.status_code, resp.json()) # Parse ServerSentEvents for byte_payload in resp.iter_lines(): # Skip line if byte_payload == b"\n": continue payload = byte_payload.decode("utf-8") # Event data if payload.startswith("data:"): # Decode payload json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) # Parse payload try: response = StreamResponse(**json_payload) except ValidationError: # If we failed to parse the payload, then it is an error payload raise parse_error(resp.status_code, json_payload) yield response class AsyncClient: """Asynchronous Client to make calls to a text-generation-inference instance Example: ```python >>> from text_generation import AsyncClient >>> client = AsyncClient("https://api-inference.huggingface.co/models/bigscience/bloomz") >>> response = await client.generate("Why is the sky blue?") >>> response.generated_text ' Rayleigh scattering' >>> result = "" >>> async for response in client.generate_stream("Why is the sky blue?"): >>> if not response.token.special: >>> result += response.token.text >>> result ' Rayleigh scattering' ``` """ def __init__( self, base_url: str, headers: Optional[Dict[str, str]] = None, cookies: Optional[Dict[str, str]] = None, timeout: int = 10, ): """ Args: base_url (`str`): text-generation-inference instance base url headers (`Optional[Dict[str, str]]`): Additional headers cookies (`Optional[Dict[str, str]]`): Cookies to include in the requests timeout (`int`): Timeout in seconds """ self.base_url = base_url self.headers = headers self.cookies = cookies self.timeout = ClientTimeout(timeout) async def chat( self, messages: List[Message], frequency_penalty: Optional[float] = None, logit_bias: Optional[List[float]] = None, logprobs: Optional[bool] = None, top_logprobs: Optional[int] = None, max_tokens: Optional[int] = None, n: Optional[int] = None, presence_penalty: Optional[float] = None, stream: bool = False, seed: Optional[int] = None, temperature: Optional[float] = None, top_p: Optional[float] = None, tools: Optional[List[Tool]] = None, tool_choice: Optional[str] = None, ) -> Union[ChatComplete, AsyncIterator[ChatCompletionChunk]]: """ Given a list of messages, generate a response asynchronously Args: messages (`List[Message]`): List of messages frequency_penalty (`float`): The parameter for frequency penalty. 0.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. logit_bias (`List[float]`): Adjust the likelihood of specified tokens logprobs (`bool`): Include log probabilities in the response top_logprobs (`int`): Include the `n` most likely tokens at each step max_tokens (`int`): Maximum number of generated tokens n (`int`): Generate `n` completions presence_penalty (`float`): The parameter for presence penalty. 0.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. stream (`bool`): Stream the response seed (`int`): Random sampling seed temperature (`float`): The value used to module the logits distribution. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation tools (`List[Tool]`): List of tools to use tool_choice (`str`): The tool to use """ request = ChatRequest( model="tgi", messages=messages, frequency_penalty=frequency_penalty, logit_bias=logit_bias, logprobs=logprobs, top_logprobs=top_logprobs, max_tokens=max_tokens, n=n, presence_penalty=presence_penalty, stream=stream, seed=seed, temperature=temperature, top_p=top_p, tools=tools, tool_choice=tool_choice, ) if not stream: return await self._chat_single_response(request) else: return self._chat_stream_response(request) async def _chat_single_response(self, request): async with ClientSession( headers=self.headers, cookies=self.cookies, timeout=self.timeout ) as session: async with session.post( f"{self.base_url}/v1/chat/completions", json=request.dict() ) as resp: payload = await resp.json() if resp.status != 200: raise parse_error(resp.status, payload) return ChatComplete(**payload) async def _chat_stream_response(self, request): async with ClientSession( headers=self.headers, cookies=self.cookies, timeout=self.timeout ) as session: async with session.post( f"{self.base_url}/v1/chat/completions", json=request.dict() ) as resp: async for byte_payload in resp.content: if byte_payload == b"\n": continue payload = byte_payload.decode("utf-8") if payload.startswith("data:"): json_payload = json.loads(payload.lstrip("data:").rstrip("\n")) try: response = ChatCompletionChunk(**json_payload) yield response except ValidationError: raise parse_error(resp.status, json_payload) async def generate( self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, best_of: Optional[int] = None, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, decoder_input_details: bool = False, top_n_tokens: Optional[int] = None, grammar: Optional[Grammar] = None, ) -> Response: """ Given a prompt, generate the following text asynchronously Args: prompt (`str`): Input text do_sample (`bool`): Activate logits sampling max_new_tokens (`int`): Maximum number of generated tokens best_of (`int`): Generate best_of sequences and return the one if the highest token logprobs repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. return_full_text (`bool`): Whether to prepend the prompt to the generated text seed (`int`): Random sampling seed stop_sequences (`List[str]`): Stop generating tokens if a member of `stop_sequences` is generated temperature (`float`): The value used to module the logits distribution. top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. truncate (`int`): Truncate inputs tokens to the given size typical_p (`float`): Typical Decoding mass See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information watermark (`bool`): Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) decoder_input_details (`bool`): Return the decoder input token logprobs and ids top_n_tokens (`int`): Return the `n` most likely tokens at each step grammar (`Grammar`): Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation of the text to match a regular expression or JSON schema. Returns: Response: generated response """ # Validate parameters parameters = Parameters( best_of=best_of, details=True, decoder_input_details=decoder_input_details, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, top_n_tokens=top_n_tokens, grammar=grammar, ) request = Request(inputs=prompt, stream=False, parameters=parameters) async with ClientSession( headers=self.headers, cookies=self.cookies, timeout=self.timeout ) as session: async with session.post(self.base_url, json=request.dict()) as resp: payload = await resp.json() if resp.status != 200: raise parse_error(resp.status, payload) return Response(**payload[0]) async def generate_stream( self, prompt: str, do_sample: bool = False, max_new_tokens: int = 20, repetition_penalty: Optional[float] = None, return_full_text: bool = False, seed: Optional[int] = None, stop_sequences: Optional[List[str]] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, truncate: Optional[int] = None, typical_p: Optional[float] = None, watermark: bool = False, top_n_tokens: Optional[int] = None, grammar: Optional[Grammar] = None, ) -> AsyncIterator[StreamResponse]: """ Given a prompt, generate the following stream of tokens asynchronously Args: prompt (`str`): Input text do_sample (`bool`): Activate logits sampling max_new_tokens (`int`): Maximum number of generated tokens repetition_penalty (`float`): The parameter for repetition penalty. 1.0 means no penalty. See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. return_full_text (`bool`): Whether to prepend the prompt to the generated text seed (`int`): Random sampling seed stop_sequences (`List[str]`): Stop generating tokens if a member of `stop_sequences` is generated temperature (`float`): The value used to module the logits distribution. top_k (`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (`float`): If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or higher are kept for generation. truncate (`int`): Truncate inputs tokens to the given size typical_p (`float`): Typical Decoding mass See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information watermark (`bool`): Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) top_n_tokens (`int`): Return the `n` most likely tokens at each step grammar (`Grammar`): Whether to use a grammar for the generation and the grammar to use. Grammars will constrain the generation of the text to match a regular expression or JSON schema. Returns: AsyncIterator[StreamResponse]: stream of generated tokens """ # Validate parameters parameters = Parameters( best_of=None, details=True, decoder_input_details=False, do_sample=do_sample, max_new_tokens=max_new_tokens, repetition_penalty=repetition_penalty, return_full_text=return_full_text, seed=seed, stop=stop_sequences if stop_sequences is not None else [], temperature=temperature, top_k=top_k, top_p=top_p, truncate=truncate, typical_p=typical_p, watermark=watermark, top_n_tokens=top_n_tokens, grammar=grammar, ) request = Request(inputs=prompt, stream=True, parameters=parameters) async with ClientSession( headers=self.headers, cookies=self.cookies, timeout=self.timeout ) as session: async with session.post(self.base_url, json=request.dict()) as resp: if resp.status != 200: raise parse_error(resp.status, await resp.json()) # Parse ServerSentEvents async for byte_payload in resp.content: # Skip line if byte_payload == b"\n": continue payload = byte_payload.decode("utf-8") # Event data if payload.startswith("data:"): # Decode payload json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) # Parse payload try: response = StreamResponse(**json_payload) except ValidationError: # If we failed to parse the payload, then it is an error payload raise parse_error(resp.status, json_payload) yield response
text-generation-inference/clients/python/text_generation/client.py/0
{ "file_path": "text-generation-inference/clients/python/text_generation/client.py", "repo_id": "text-generation-inference", "token_count": 14202 }
210
# Quantization TGI offers GPTQ and bits-and-bytes quantization to quantize large language models. ## Quantization with GPTQ GPTQ is a post-training quantization method to make the model smaller. It quantizes the layers by finding a compressed version of that weight, that will yield a minimum mean squared error like below 👇 Given a layer \\(l\\) with weight matrix \\(W_{l}\\) and layer input \\(X_{l}\\), find quantized weight \\(\\hat{W}_{l}\\): $$({\hat{W}_{l}}^{*} = argmin_{\hat{W_{l}}} ||W_{l}X-\hat{W}_{l}X||^{2}_{2})$$ TGI allows you to both run an already GPTQ quantized model (see available models [here](https://huggingface.co/models?search=gptq)) or quantize a model of your choice using quantization script. You can run a quantized model by simply passing --quantize like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize gptq ``` Note that TGI's GPTQ implementation doesn't use [AutoGPTQ](https://github.com/PanQiWei/AutoGPTQ) under the hood. However, models quantized using AutoGPTQ or Optimum can still be served by TGI. To quantize a given model using GPTQ with a calibration dataset, simply run ```bash text-generation-server quantize tiiuae/falcon-40b /data/falcon-40b-gptq # Add --upload-to-model-id MYUSERNAME/falcon-40b to push the created model to the hub directly ``` This will create a new directory with the quantized files which you can use with, ```bash text-generation-launcher --model-id /data/falcon-40b-gptq/ --sharded true --num-shard 2 --quantize gptq ``` You can learn more about the quantization options by running `text-generation-server quantize --help`. If you wish to do more with GPTQ models (e.g. train an adapter on top), you can read about transformers GPTQ integration [here](https://huggingface.co/blog/gptq-integration). You can learn more about GPTQ from the [paper](https://arxiv.org/pdf/2210.17323.pdf). ## Quantization with bitsandbytes bitsandbytes is a library used to apply 8-bit and 4-bit quantization to models. Unlike GPTQ quantization, bitsandbytes doesn't require a calibration dataset or any post-processing – weights are automatically quantized on load. However, inference with bitsandbytes is slower than GPTQ or FP16 precision. 8-bit quantization enables multi-billion parameter scale models to fit in smaller hardware without degrading performance too much. In TGI, you can use 8-bit quantization by adding `--quantize bitsandbytes` like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize bitsandbytes ``` 4-bit quantization is also possible with bitsandbytes. You can choose one of the following 4-bit data types: 4-bit float (`fp4`), or 4-bit `NormalFloat` (`nf4`). These data types were introduced in the context of parameter-efficient fine-tuning, but you can apply them for inference by automatically converting the model weights on load. In TGI, you can use 4-bit quantization by adding `--quantize bitsandbytes-nf4` or `--quantize bitsandbytes-fp4` like below 👇 ```bash docker run --gpus all --shm-size 1g -p 8080:80 -v $volume:/data ghcr.io/huggingface/text-generation-inference:latest --model-id $model --quantize bitsandbytes-nf4 ``` You can get more information about 8-bit quantization by reading this [blog post](https://huggingface.co/blog/hf-bitsandbytes-integration), and 4-bit quantization by reading [this blog post](https://huggingface.co/blog/4bit-transformers-bitsandbytes).
text-generation-inference/docs/source/conceptual/quantization.md/0
{ "file_path": "text-generation-inference/docs/source/conceptual/quantization.md", "repo_id": "text-generation-inference", "token_count": 1109 }
211
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 1724, "logprob": -7.703125, "text": "What" }, { "id": 338, "logprob": -1.4765625, "text": "is" }, { "id": 21784, "logprob": -9.390625, "text": "Deep" }, { "id": 29257, "logprob": -1.8583984, "text": "Learning" }, { "id": 29973, "logprob": -0.7548828, "text": "?" } ], "seed": null, "tokens": [ { "id": 13, "logprob": -1.9306641, "special": false, "text": "\n" }, { "id": 5618, "logprob": -2.4550781, "special": false, "text": "What" }, { "id": 338, "logprob": -0.5732422, "special": false, "text": " is" }, { "id": 278, "logprob": -1.5761719, "special": false, "text": " the" }, { "id": 4328, "logprob": -1.5888672, "special": false, "text": " difference" }, { "id": 1546, "logprob": -0.026504517, "special": false, "text": " between" }, { "id": 21784, "logprob": -1.4287109, "special": false, "text": " Deep" }, { "id": 29257, "logprob": -0.15856934, "special": false, "text": " Learning" }, { "id": 322, "logprob": -0.17456055, "special": false, "text": " and" }, { "id": 6189, "logprob": -0.62646484, "special": false, "text": " Machine" } ], "top_tokens": null }, "generated_text": "\nWhat is the difference between Deep Learning and Machine" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_awq/test_flash_llama_awq.json", "repo_id": "text-generation-inference", "token_count": 1236 }
212
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 1, "logprob": null, "text": "<s>" }, { "id": 4321, "logprob": -8.6875, "text": "Test" }, { "id": 2009, "logprob": -11.546875, "text": "request" } ], "seed": null, "tokens": [ { "id": 363, "logprob": -1.5351562, "special": false, "text": " for" }, { "id": 847, "logprob": -2.5722656, "special": false, "text": " /" }, { "id": 2754, "logprob": -2.2714844, "special": false, "text": "api" }, { "id": 29914, "logprob": -0.03414917, "special": false, "text": "/" }, { "id": 29894, "logprob": -0.95996094, "special": false, "text": "v" }, { "id": 29896, "logprob": -0.3635254, "special": false, "text": "1" }, { "id": 29914, "logprob": -0.013031006, "special": false, "text": "/" }, { "id": 16418, "logprob": -3.1523438, "special": false, "text": "projects" }, { "id": 29914, "logprob": -0.43701172, "special": false, "text": "/" }, { "id": 29896, "logprob": -1.9394531, "special": false, "text": "1" } ], "top_tokens": null }, "generated_text": " for /api/v1/projects/1" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_llama/test_flash_llama.json", "repo_id": "text-generation-inference", "token_count": 1050 }
213
{ "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 14402, "logprob": null, "text": "Test" }, { "id": 2581, "logprob": -11.6171875, "text": " request" } ], "seed": null, "tokens": [ { "id": 25, "logprob": -2.3203125, "special": false, "text": ":" }, { "id": 1391, "logprob": -0.98779297, "special": false, "text": " {" }, { "id": 25927, "logprob": -0.76660156, "special": false, "text": "request" }, { "id": 92, "logprob": -0.7246094, "special": false, "text": "}" }, { "id": 4943, "logprob": -0.41333008, "special": false, "text": "\")" }, { "id": 198, "logprob": -0.11785889, "special": false, "text": "\n" }, { "id": 50280, "logprob": -0.97265625, "special": false, "text": " " }, { "id": 26209, "logprob": -1.4414062, "special": false, "text": "response" }, { "id": 796, "logprob": -0.0569458, "special": false, "text": " =" }, { "id": 2116, "logprob": -1.1533203, "special": false, "text": " self" } ], "top_tokens": null }, "generated_text": ": {request}\")\n response = self" }
text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_phi/test_flash_phi.json", "repo_id": "text-generation-inference", "token_count": 1003 }
214
[ { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -8.5859375, "text": " ge" }, { "id": 21017, "logprob": -7.5820312, "text": "ometric" }, { "id": 81, "logprob": -0.26708984, "text": "_" }, { "id": 6009, "logprob": -1.6386719, "text": "mean" }, { "id": 26, "logprob": -0.22717285, "text": "(" }, { "id": 62, "logprob": -5.234375, "text": "L" }, { "id": 44, "logprob": -3.1015625, "text": ":" }, { "id": 1682, "logprob": -1.1083984, "text": " List" }, { "id": 77, "logprob": -0.14294434, "text": "[" }, { "id": 1808, "logprob": -0.32592773, "text": "float" }, { "id": 10794, "logprob": -2.8164062, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -0.12817383, "special": false, "text": "\n " }, { "id": 1524, "logprob": -0.9863281, "special": false, "text": " \"\"\"" }, { "id": 284, "logprob": -0.7011719, "special": false, "text": "\n " }, { "id": 14883, "logprob": -2.2050781, "special": false, "text": " Calculate" }, { "id": 322, "logprob": -0.2668457, "special": false, "text": " the" }, { "id": 3226, "logprob": -0.08465576, "special": false, "text": " ge" }, { "id": 21017, "logprob": -0.019012451, "special": false, "text": "ometric" }, { "id": 5651, "logprob": -0.028625488, "special": false, "text": " mean" }, { "id": 432, "logprob": -0.29418945, "special": false, "text": " of" }, { "id": 312, "logprob": -0.3161621, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n \"\"\"\n Calculate the geometric mean of a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -8.5859375, "text": " ge" }, { "id": 21017, "logprob": -7.59375, "text": "ometric" }, { "id": 81, "logprob": -0.26953125, "text": "_" }, { "id": 6009, "logprob": -1.640625, "text": "mean" }, { "id": 26, "logprob": -0.22705078, "text": "(" }, { "id": 62, "logprob": -5.234375, "text": "L" }, { "id": 44, "logprob": -3.1132812, "text": ":" }, { "id": 1682, "logprob": -1.1123047, "text": " List" }, { "id": 77, "logprob": -0.14294434, "text": "[" }, { "id": 1808, "logprob": -0.32299805, "text": "float" }, { "id": 10794, "logprob": -2.8164062, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -0.12854004, "special": false, "text": "\n " }, { "id": 1524, "logprob": -0.9897461, "special": false, "text": " \"\"\"" }, { "id": 284, "logprob": -0.69970703, "special": false, "text": "\n " }, { "id": 14883, "logprob": -2.2050781, "special": false, "text": " Calculate" }, { "id": 322, "logprob": -0.2668457, "special": false, "text": " the" }, { "id": 3226, "logprob": -0.08496094, "special": false, "text": " ge" }, { "id": 21017, "logprob": -0.019012451, "special": false, "text": "ometric" }, { "id": 5651, "logprob": -0.029037476, "special": false, "text": " mean" }, { "id": 432, "logprob": -0.2939453, "special": false, "text": " of" }, { "id": 312, "logprob": -0.31591797, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n \"\"\"\n Calculate the geometric mean of a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -8.5859375, "text": " ge" }, { "id": 21017, "logprob": -7.5859375, "text": "ometric" }, { "id": 81, "logprob": -0.26586914, "text": "_" }, { "id": 6009, "logprob": -1.6347656, "text": "mean" }, { "id": 26, "logprob": -0.22766113, "text": "(" }, { "id": 62, "logprob": -5.2265625, "text": "L" }, { "id": 44, "logprob": -3.0976562, "text": ":" }, { "id": 1682, "logprob": -1.1025391, "text": " List" }, { "id": 77, "logprob": -0.1427002, "text": "[" }, { "id": 1808, "logprob": -0.32592773, "text": "float" }, { "id": 10794, "logprob": -2.8164062, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -0.13012695, "special": false, "text": "\n " }, { "id": 1524, "logprob": -0.98046875, "special": false, "text": " \"\"\"" }, { "id": 284, "logprob": -0.69921875, "special": false, "text": "\n " }, { "id": 14883, "logprob": -2.1992188, "special": false, "text": " Calculate" }, { "id": 322, "logprob": -0.2668457, "special": false, "text": " the" }, { "id": 3226, "logprob": -0.083496094, "special": false, "text": " ge" }, { "id": 21017, "logprob": -0.01902771, "special": false, "text": "ometric" }, { "id": 5651, "logprob": -0.029006958, "special": false, "text": " mean" }, { "id": 432, "logprob": -0.29248047, "special": false, "text": " of" }, { "id": 312, "logprob": -0.3161621, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n \"\"\"\n Calculate the geometric mean of a" }, { "details": { "best_of_sequences": null, "finish_reason": "length", "generated_tokens": 10, "prefill": [ { "id": 589, "logprob": null, "text": "def" }, { "id": 3226, "logprob": -8.5859375, "text": " ge" }, { "id": 21017, "logprob": -7.5859375, "text": "ometric" }, { "id": 81, "logprob": -0.26904297, "text": "_" }, { "id": 6009, "logprob": -1.6386719, "text": "mean" }, { "id": 26, "logprob": -0.22705078, "text": "(" }, { "id": 62, "logprob": -5.234375, "text": "L" }, { "id": 44, "logprob": -3.1132812, "text": ":" }, { "id": 1682, "logprob": -1.1074219, "text": " List" }, { "id": 77, "logprob": -0.14477539, "text": "[" }, { "id": 1808, "logprob": -0.3256836, "text": "float" }, { "id": 10794, "logprob": -2.8027344, "text": "]):" } ], "seed": null, "tokens": [ { "id": 284, "logprob": -0.12915039, "special": false, "text": "\n " }, { "id": 1524, "logprob": -0.98535156, "special": false, "text": " \"\"\"" }, { "id": 284, "logprob": -0.69921875, "special": false, "text": "\n " }, { "id": 14883, "logprob": -2.2011719, "special": false, "text": " Calculate" }, { "id": 322, "logprob": -0.26708984, "special": false, "text": " the" }, { "id": 3226, "logprob": -0.08502197, "special": false, "text": " ge" }, { "id": 21017, "logprob": -0.019012451, "special": false, "text": "ometric" }, { "id": 5651, "logprob": -0.028625488, "special": false, "text": " mean" }, { "id": 432, "logprob": -0.29589844, "special": false, "text": " of" }, { "id": 312, "logprob": -0.31591797, "special": false, "text": " a" } ], "top_tokens": null }, "generated_text": "\n \"\"\"\n Calculate the geometric mean of a" } ]
text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_starcoder_gptq/test_flash_starcoder_gptq_load.json", "repo_id": "text-generation-inference", "token_count": 7553 }
215
{ "details": { "best_of_sequences": null, "finish_reason": "eos_token", "generated_tokens": 7, "prefill": [ { "id": 0, "logprob": null, "text": "<pad>" } ], "seed": null, "tokens": [ { "id": 3, "logprob": -0.7001953, "special": false, "text": " " }, { "id": 18, "logprob": -1.1943359, "special": false, "text": "-" }, { "id": 26937, "logprob": -1.2099609, "special": false, "text": "196" }, { "id": 3, "logprob": -1.2451172, "special": false, "text": " " }, { "id": 1956, "logprob": -0.3322754, "special": false, "text": "°" }, { "id": 254, "logprob": -0.19213867, "special": false, "text": "C" }, { "id": 1, "logprob": -0.030151367, "special": true, "text": "</s>" } ] }, "generated_text": "-196 °C" }
text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json/0
{ "file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_t5_sharded/test_t5_sharded.json", "repo_id": "text-generation-inference", "token_count": 680 }
216
import pytest @pytest.fixture(scope="module") def flash_medusa_handle(launcher): with launcher( "FasterDecoding/medusa-vicuna-7b-v1.3", num_shard=2, revision="refs/pr/1" ) as handle: yield handle @pytest.fixture(scope="module") async def flash_medusa(flash_medusa_handle): await flash_medusa_handle.health(300) return flash_medusa_handle.client @pytest.mark.asyncio async def test_flash_medusa_simple(flash_medusa, response_snapshot): response = await flash_medusa.generate( "What is Deep Learning?", max_new_tokens=10, decoder_input_details=True ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_medusa_all_params(flash_medusa, response_snapshot): response = await flash_medusa.generate( "What is Deep Learning?", max_new_tokens=10, repetition_penalty=1.2, return_full_text=True, stop_sequences=["test"], temperature=0.5, top_p=0.9, top_k=10, truncate=5, typical_p=0.9, watermark=True, decoder_input_details=True, seed=0, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.asyncio async def test_flash_medusa_load(flash_medusa, generate_load, response_snapshot): responses = await generate_load( flash_medusa, "What is Deep Learning?", max_new_tokens=10, n=4 ) assert len(responses) == 4 assert all( [r.generated_text == responses[0].generated_text for r in responses] ), f"{[r.generated_text for r in responses]}" assert ( responses[0].generated_text == "\nDeep learning is a subset of machine learning" ) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_flash_medusa.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_flash_medusa.py", "repo_id": "text-generation-inference", "token_count": 749 }
217
import pytest @pytest.fixture(scope="module") def neox_sharded_handle(launcher): with launcher( "OpenAssistant/oasst-sft-1-pythia-12b", num_shard=2, use_flash_attention=False ) as handle: yield handle @pytest.fixture(scope="module") async def neox_sharded(neox_sharded_handle): await neox_sharded_handle.health(300) return neox_sharded_handle.client @pytest.mark.skip @pytest.mark.asyncio async def test_neox(neox_sharded, response_snapshot): response = await neox_sharded.generate( "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, decoder_input_details=True, ) assert response.details.generated_tokens == 10 assert response == response_snapshot @pytest.mark.skip @pytest.mark.asyncio async def test_neox_load(neox_sharded, generate_load, response_snapshot): responses = await generate_load( neox_sharded, "<|prompter|>What is a meme, and what's the history behind this word?<|endoftext|><|assistant|>", max_new_tokens=10, n=4, ) assert len(responses) == 4 assert all([r.generated_text == responses[0].generated_text for r in responses]) assert responses == response_snapshot
text-generation-inference/integration-tests/models/test_neox_sharded.py/0
{ "file_path": "text-generation-inference/integration-tests/models/test_neox_sharded.py", "repo_id": "text-generation-inference", "token_count": 507 }
218
[package] name = "text-generation-router" description = "Text Generation Webserver" build = "build.rs" version.workspace = true edition.workspace = true authors.workspace = true homepage.workspace = true [lib] path = "src/lib.rs" [[bin]] name = "text-generation-router" path = "src/main.rs" [dependencies] async-stream = "0.3.5" axum = { version = "0.6.20", features = ["json"] } axum-tracing-opentelemetry = "0.14.1" text-generation-client = { path = "client" } clap = { version = "4.4.5", features = ["derive", "env"] } futures = "0.3.28" hf-hub = { version = "0.3.0", features = ["tokio"] } jsonschema = { version = "0.17.1", features = ["draft202012"] } metrics = "0.21.1" metrics-exporter-prometheus = { version = "0.12.1", features = [] } nohash-hasher = "0.2.0" opentelemetry = { version = "0.20.0", features = ["rt-tokio"] } opentelemetry-otlp = "0.13.0" rand = "0.8.5" reqwest = { version = "0.11.20", features = [] } serde = "1.0.188" serde_json = "1.0.107" thiserror = "1.0.48" tokenizers = { version = "0.15.1", features = ["http"] } tokio = { version = "1.32.0", features = ["rt", "rt-multi-thread", "parking_lot", "signal", "sync"] } tokio-stream = "0.1.14" tower-http = { version = "0.4.4", features = ["cors"] } tracing = "0.1.37" tracing-opentelemetry = "0.21.0" tracing-subscriber = { version = "0.3.17", features = ["json", "env-filter"] } utoipa = { version = "3.5.0", features = ["axum_extras"] } utoipa-swagger-ui = { version = "3.1.5", features = ["axum"] } ngrok = { version = "0.13.1", features = ["axum"], optional = true } init-tracing-opentelemetry = { version = "0.14.1", features = ["opentelemetry-otlp"] } minijinja = { git = "https://github.com/mitsuhiko/minijinja.git", branch = "main", commit = "5cd4efb" } futures-util = "0.3.30" [build-dependencies] vergen = { version = "8.2.5", features = ["build", "git", "gitcl"] } [features] default = ["ngrok"] ngrok = ["dep:ngrok"] google = []
text-generation-inference/router/Cargo.toml/0
{ "file_path": "text-generation-inference/router/Cargo.toml", "repo_id": "text-generation-inference", "token_count": 796 }
219
/// HTTP Server logic use crate::health::Health; use crate::infer::{InferError, InferResponse, InferStreamResponse}; use crate::validation::ValidationError; use crate::{ BestOfSequence, Details, ErrorResponse, FinishReason, GenerateParameters, GenerateRequest, GenerateResponse, GrammarType, HubModelInfo, HubTokenizerConfig, Infer, Info, Message, PrefillToken, SimpleToken, StreamDetails, StreamResponse, Token, TokenizeResponse, Usage, Validation, }; use crate::{ ChatCompletion, ChatCompletionChoice, ChatCompletionChunk, ChatCompletionComplete, ChatCompletionDelta, ChatCompletionLogprob, ChatCompletionLogprobs, ChatCompletionTopLogprob, ChatRequest, CompatGenerateRequest, Completion, CompletionComplete, CompletionCompleteChunk, CompletionRequest, VertexRequest, VertexResponse, }; use crate::{FunctionDefinition, FunctionRef, FunctionsMap, Properties, ToolCall, ToolType, Tools}; use axum::extract::Extension; use axum::http::{HeaderMap, Method, StatusCode}; use axum::response::sse::{Event, KeepAlive, Sse}; use axum::response::{IntoResponse, Response}; use axum::routing::{get, post}; use axum::{http, Json, Router}; use axum_tracing_opentelemetry::middleware::OtelAxumLayer; use futures::stream::FuturesUnordered; use futures::stream::StreamExt; use futures::Stream; use futures::TryStreamExt; use metrics_exporter_prometheus::{Matcher, PrometheusBuilder, PrometheusHandle}; use serde_json::Value; use std::collections::HashMap; use std::convert::Infallible; use std::net::SocketAddr; use std::sync::atomic::AtomicBool; use std::sync::Arc; use text_generation_client::{ShardInfo, ShardedClient}; use tokenizers::Tokenizer; use tokio::signal; use tokio::time::Instant; use tower_http::cors::{AllowOrigin, CorsLayer}; use tracing::{info_span, instrument, Instrument}; use utoipa::OpenApi; use utoipa_swagger_ui::SwaggerUi; /// Generate tokens if `stream == false` or a stream of token if `stream == true` #[utoipa::path( post, tag = "Text Generation Inference", path = "/", request_body = CompatGenerateRequest, responses( (status = 200, description = "Generated Text", content( ("application/json" = GenerateResponse), ("text/event-stream" = StreamResponse), )), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"})), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded"})), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error"})), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation"})), ) )] #[instrument(skip(infer, req))] async fn compat_generate( Extension(default_return_full_text): Extension<bool>, infer: Extension<Infer>, compute_type: Extension<ComputeType>, Json(mut req): Json<CompatGenerateRequest>, ) -> Result<Response, (StatusCode, Json<ErrorResponse>)> { // default return_full_text given the pipeline_tag if req.parameters.return_full_text.is_none() { req.parameters.return_full_text = Some(default_return_full_text) } // switch on stream if req.stream { Ok(generate_stream(infer, compute_type, Json(req.into())) .await .into_response()) } else { let (headers, Json(generation)) = generate(infer, compute_type, Json(req.into())).await?; // wrap generation inside a Vec to match api-inference Ok((headers, Json(vec![generation])).into_response()) } } /// Text Generation Inference endpoint info #[utoipa::path( get, tag = "Text Generation Inference", path = "/info", responses((status = 200, description = "Served model info", body = Info)) )] #[instrument] async fn get_model_info(info: Extension<Info>) -> Json<Info> { Json(info.0) } #[utoipa::path( get, tag = "Text Generation Inference", path = "/health", responses( (status = 200, description = "Everything is working fine"), (status = 503, description = "Text generation inference is down", body = ErrorResponse, example = json ! ({"error": "unhealthy", "error_type": "healthcheck"})), ) )] #[instrument(skip(health))] /// Health check method async fn health(mut health: Extension<Health>) -> Result<(), (StatusCode, Json<ErrorResponse>)> { match health.check().await { true => Ok(()), false => Err(( StatusCode::SERVICE_UNAVAILABLE, Json(ErrorResponse { error: "unhealthy".to_string(), error_type: "healthcheck".to_string(), }), )), } } /// Generate tokens #[utoipa::path( post, tag = "Text Generation Inference", path = "/generate", request_body = GenerateRequest, responses( (status = 200, description = "Generated Text", body = GenerateResponse), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"})), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded"})), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error"})), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation"})), ) )] #[instrument( skip_all, fields( parameters = ? req.parameters, total_time, validation_time, queue_time, inference_time, time_per_token, seed, ) )] async fn generate( infer: Extension<Infer>, Extension(ComputeType(compute_type)): Extension<ComputeType>, Json(req): Json<GenerateRequest>, ) -> Result<(HeaderMap, Json<GenerateResponse>), (StatusCode, Json<ErrorResponse>)> { let span = tracing::Span::current(); let start_time = Instant::now(); metrics::increment_counter!("tgi_request_count"); tracing::debug!("Input: {}", req.inputs); let compute_characters = req.inputs.chars().count(); let mut add_prompt = None; if req.parameters.return_full_text.unwrap_or(false) { add_prompt = Some(req.inputs.clone()); } let details: bool = req.parameters.details || req.parameters.decoder_input_details; // Inference let (response, best_of_responses) = match req.parameters.best_of { Some(best_of) if best_of > 1 => { let (response, best_of_responses) = infer.generate_best_of(req, best_of).await?; (response, Some(best_of_responses)) } _ => (infer.generate(req).await?, None), }; // Token details let input_length = response._input_length; let details = match details { true => { // convert best_of_responses let best_of_sequences = best_of_responses.map(|responses: Vec<InferResponse>| { responses .into_iter() .map(|response: InferResponse| { // Add prompt if return_full_text let mut output_text = response.generated_text.text; if let Some(prompt) = &add_prompt { output_text = prompt.clone() + &output_text; } BestOfSequence { generated_text: output_text, finish_reason: FinishReason::from( response.generated_text.finish_reason, ), generated_tokens: response.generated_text.generated_tokens, prefill: response.prefill, tokens: response.tokens, top_tokens: response.top_tokens, seed: response.generated_text.seed, } }) .collect() }); Some(Details { finish_reason: FinishReason::from(response.generated_text.finish_reason), generated_tokens: response.generated_text.generated_tokens, prefill: response.prefill, tokens: response.tokens, seed: response.generated_text.seed, best_of_sequences, top_tokens: response.top_tokens, }) } false => None, }; // Timings let total_time = start_time.elapsed(); let validation_time = response.queued - start_time; let queue_time = response.start - response.queued; let inference_time = Instant::now() - response.start; let time_per_token = inference_time / response.generated_text.generated_tokens; // Tracing metadata span.record("total_time", format!("{total_time:?}")); span.record("validation_time", format!("{validation_time:?}")); span.record("queue_time", format!("{queue_time:?}")); span.record("inference_time", format!("{inference_time:?}")); span.record("time_per_token", format!("{time_per_token:?}")); span.record("seed", format!("{:?}", response.generated_text.seed)); // Headers let mut headers = HeaderMap::new(); headers.insert("x-compute-type", compute_type.parse().unwrap()); headers.insert( "x-compute-time", total_time.as_secs_f64().to_string().parse().unwrap(), ); headers.insert( "x-compute-characters", compute_characters.to_string().parse().unwrap(), ); headers.insert( "x-total-time", total_time.as_millis().to_string().parse().unwrap(), ); headers.insert( "x-validation-time", validation_time.as_millis().to_string().parse().unwrap(), ); headers.insert( "x-queue-time", queue_time.as_millis().to_string().parse().unwrap(), ); headers.insert( "x-inference-time", inference_time.as_millis().to_string().parse().unwrap(), ); headers.insert( "x-time-per-token", time_per_token.as_millis().to_string().parse().unwrap(), ); headers.insert("x-prompt-tokens", input_length.into()); headers.insert( "x-generated-tokens", response.generated_text.generated_tokens.into(), ); // Metrics metrics::increment_counter!("tgi_request_success"); metrics::histogram!("tgi_request_duration", total_time.as_secs_f64()); metrics::histogram!( "tgi_request_validation_duration", validation_time.as_secs_f64() ); metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64()); metrics::histogram!( "tgi_request_inference_duration", inference_time.as_secs_f64() ); metrics::histogram!( "tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64() ); metrics::histogram!( "tgi_request_generated_tokens", response.generated_text.generated_tokens as f64 ); // Send response let mut output_text = response.generated_text.text; if let Some(prompt) = add_prompt { output_text = prompt + &output_text; } tracing::debug!("Output: {}", output_text); tracing::info!("Success"); let response = GenerateResponse { generated_text: output_text, details, }; Ok((headers, Json(response))) } /// Generate a stream of token using Server-Sent Events #[utoipa::path( post, tag = "Text Generation Inference", path = "/generate_stream", request_body = GenerateRequest, responses( (status = 200, description = "Generated Text", body = StreamResponse, content_type = "text/event-stream"), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"}), content_type = "text/event-stream"), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded"}), content_type = "text/event-stream"), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error"}), content_type = "text/event-stream"), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation"}), content_type = "text/event-stream"), ) )] #[instrument( skip_all, fields( parameters = ? req.parameters, total_time, validation_time, queue_time, inference_time, time_per_token, seed, ) )] async fn generate_stream( Extension(infer): Extension<Infer>, Extension(compute_type): Extension<ComputeType>, Json(req): Json<GenerateRequest>, ) -> ( HeaderMap, Sse<impl Stream<Item = Result<Event, Infallible>>>, ) { let on_message_callback = |stream_token: StreamResponse| { let event = Event::default(); event.json_data(stream_token).unwrap() }; let (headers, response_stream) = generate_stream_internal(infer, compute_type, Json(req), on_message_callback).await; let sse = Sse::new(response_stream).keep_alive(KeepAlive::default()); (headers, sse) } async fn generate_stream_internal( infer: Infer, ComputeType(compute_type): ComputeType, Json(req): Json<GenerateRequest>, on_message_callback: impl Fn(StreamResponse) -> Event, ) -> (HeaderMap, impl Stream<Item = Result<Event, Infallible>>) { let span = tracing::Span::current(); let start_time = Instant::now(); metrics::increment_counter!("tgi_request_count"); tracing::debug!("Input: {}", req.inputs); let compute_characters = req.inputs.chars().count(); let mut headers = HeaderMap::new(); headers.insert("x-compute-type", compute_type.parse().unwrap()); headers.insert( "x-compute-characters", compute_characters.to_string().parse().unwrap(), ); headers.insert("X-Accel-Buffering", "no".parse().unwrap()); let stream = async_stream::stream! { // Inference let mut end_reached = false; let mut error = false; let mut add_prompt = None; if req.parameters.return_full_text.unwrap_or(false) { add_prompt = Some(req.inputs.clone()); } let details = req.parameters.details; let best_of = req.parameters.best_of.unwrap_or(1); if best_of != 1 { let err = InferError::from(ValidationError::BestOfStream); metrics::increment_counter!("tgi_request_failure", "err" => "validation"); tracing::error!("{err}"); yield Ok(Event::from(err)); } else if req.parameters.decoder_input_details { let err = InferError::from(ValidationError::PrefillDetailsStream); metrics::increment_counter!("tgi_request_failure", "err" => "validation"); tracing::error!("{err}"); yield Ok(Event::from(err)); } else { match infer.generate_stream(req).instrument(info_span!(parent: &span, "async_stream")).await { // Keep permit as long as generate_stream lives Ok((_permit, _input_length, mut response_stream)) => { let mut index = 0; // Server-Sent Event stream while let Some(response) = response_stream.next().await { index += 1; match response { Ok(response) => { match response { // Prefill is ignored InferStreamResponse::Prefill(_) => {} // Yield event for every new token InferStreamResponse::Intermediate{ token, top_tokens, } => { tracing::debug!(parent: &span, "Token: {:?}", token); // StreamResponse let stream_token = StreamResponse { index, token, top_tokens, generated_text: None, details: None, }; let event = on_message_callback(stream_token); yield Ok(event); } // Yield event for last token and compute timings InferStreamResponse::End { token, generated_text, start, queued, top_tokens, } => { // Token details let details = match details { true => Some(StreamDetails { finish_reason: FinishReason::from(generated_text.finish_reason), generated_tokens: generated_text.generated_tokens, seed: generated_text.seed, }), false => None, }; // Timings let total_time = start_time.elapsed(); let validation_time = queued - start_time; let queue_time = start - queued; let inference_time = Instant::now() - start; let time_per_token = inference_time / generated_text.generated_tokens; // Tracing metadata span.record("total_time", format!("{total_time:?}")); span.record("validation_time", format!("{validation_time:?}")); span.record("queue_time", format!("{queue_time:?}")); span.record("inference_time", format!("{inference_time:?}")); span.record("time_per_token", format!("{time_per_token:?}")); span.record("seed", format!("{:?}", generated_text.seed)); // Metrics metrics::increment_counter!("tgi_request_success"); metrics::histogram!("tgi_request_duration", total_time.as_secs_f64()); metrics::histogram!("tgi_request_validation_duration", validation_time.as_secs_f64()); metrics::histogram!("tgi_request_queue_duration", queue_time.as_secs_f64()); metrics::histogram!("tgi_request_inference_duration", inference_time.as_secs_f64()); metrics::histogram!("tgi_request_mean_time_per_token_duration", time_per_token.as_secs_f64()); metrics::histogram!("tgi_request_generated_tokens", generated_text.generated_tokens as f64); // StreamResponse end_reached = true; let mut output_text = generated_text.text; if let Some(prompt) = add_prompt { output_text = prompt + &output_text; } tracing::debug!(parent: &span, "Output: {}", output_text); tracing::info!(parent: &span, "Success"); let stream_token = StreamResponse { index, token, top_tokens, generated_text: Some(output_text), details }; let event = on_message_callback(stream_token); yield Ok(event); break; } } } // yield error Err(err) => { error = true; yield Ok(Event::from(err)); break; } } } }, // yield error Err(err) => { error = true; yield Ok(Event::from(err)); } } // Check if generation reached the end // Skip if we already sent an error if !end_reached && !error { let err = InferError::IncompleteGeneration; metrics::increment_counter!("tgi_request_failure", "err" => "incomplete"); tracing::error!("{err}"); yield Ok(Event::from(err)); } } }; (headers, stream) } /// Generate tokens #[utoipa::path( post, tag = "Text Generation Inference", path = "/v1/completions", request_body = CompletionRequest, responses( (status = 200, description = "Generated Text", body = ChatCompletionChunk), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"})), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded"})), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error"})), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation"})), ) )] #[instrument( skip_all, fields( // parameters = ? req.parameters, total_time, validation_time, queue_time, inference_time, time_per_token, seed, ) )] async fn completions( Extension(infer): Extension<Infer>, Extension(compute_type): Extension<ComputeType>, Extension(info): Extension<Info>, Json(req): Json<CompletionRequest>, ) -> Result<Response, (StatusCode, Json<ErrorResponse>)> { metrics::increment_counter!("tgi_request_count"); let stream = req.stream; let max_new_tokens = req.max_tokens.or(Some(100)); let seed = req.seed; // if suffix is present throw an error if req.suffix.is_some() { metrics::increment_counter!("tgi_request_failure", "err" => "validation"); return Err(( StatusCode::UNPROCESSABLE_ENTITY, Json(ErrorResponse { error: "Suffix is not supported and can be achieved by preprocessing the prompt." .to_string(), error_type: "suffix not supported".to_string(), }), )); } // build the request passing some parameters let generate_request = GenerateRequest { inputs: req.prompt.to_string(), parameters: GenerateParameters { best_of: None, temperature: req.temperature, repetition_penalty: req.repetition_penalty, frequency_penalty: req.frequency_penalty, top_k: None, top_p: req.top_p, typical_p: None, do_sample: true, max_new_tokens, return_full_text: None, stop: Vec::new(), truncate: None, watermark: false, details: true, decoder_input_details: !stream, seed, top_n_tokens: None, grammar: None, }, }; if stream { let on_message_callback = move |stream_token: StreamResponse| { let event = Event::default(); let current_time = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_else(|_| std::time::Duration::from_secs(0)) .as_secs(); event .json_data(CompletionCompleteChunk { id: "".to_string(), object: "text_completion".to_string(), created: current_time, choices: vec![CompletionComplete { finish_reason: "".to_string(), index: 0, logprobs: None, text: stream_token.token.text, }], model: info.model_id.clone(), system_fingerprint: format!( "{}-{}", info.version, info.docker_label.unwrap_or("native") ), }) .map_or_else( |e| { println!("Failed to serialize ChatCompletionChunk: {:?}", e); Event::default() }, |data| data, ) }; let (headers, response_stream) = generate_stream_internal( infer, compute_type, Json(generate_request), on_message_callback, ) .await; let sse = Sse::new(response_stream).keep_alive(KeepAlive::default()); Ok((headers, sse).into_response()) } else { let (headers, Json(generation)) = generate( Extension(infer), Extension(compute_type), Json(generate_request), ) .await?; let current_time = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_else(|_| std::time::Duration::from_secs(0)) .as_secs(); let details = generation.details.ok_or(( // this should never happen but handle if details are missing unexpectedly StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: "No details in generation".to_string(), error_type: "no details".to_string(), }), ))?; let response = Completion { id: "".to_string(), object: "text_completion".to_string(), created: current_time, model: info.model_id.clone(), system_fingerprint: format!( "{}-{}", info.version, info.docker_label.unwrap_or("native") ), choices: vec![CompletionComplete { finish_reason: details.finish_reason.to_string(), index: 0, logprobs: None, text: generation.generated_text, }], usage: Usage { prompt_tokens: details.prefill.len() as u32, completion_tokens: details.generated_tokens, total_tokens: details.prefill.len() as u32 + details.generated_tokens, }, }; Ok((headers, Json(response)).into_response()) } } /// Generate tokens #[utoipa::path( post, tag = "Text Generation Inference", path = "/v1/chat/completions", request_body = ChatRequest, responses( (status = 200, description = "Generated Text", body = ChatCompletionChunk), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"})), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded"})), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error"})), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation"})), ) )] #[instrument( skip_all, fields( // parameters = ? req.parameters, total_time, validation_time, queue_time, inference_time, time_per_token, seed, ) )] async fn chat_completions( Extension(infer): Extension<Infer>, Extension(compute_type): Extension<ComputeType>, Extension(info): Extension<Info>, Json(req): Json<ChatRequest>, ) -> Result<Response, (StatusCode, Json<ErrorResponse>)> { metrics::increment_counter!("tgi_request_count"); let stream = req.stream; let max_new_tokens = req.max_tokens.or(Some(100)); let repetition_penalty = req .presence_penalty // rescale repetition_penalty from (-2.0, 2.0) to (0.0, 4.0) .map(|x| x + 2.0); let logprobs = req.logprobs.unwrap_or(false); let seed = req.seed; let stop = req.stop.unwrap_or_default(); // apply chat template to flatten the request into a single input let mut inputs = match infer.apply_chat_template(req.messages) { Ok(inputs) => inputs, Err(err) => { metrics::increment_counter!("tgi_request_failure", "err" => "validation"); tracing::error!("{err}"); return Err(( StatusCode::UNPROCESSABLE_ENTITY, Json(ErrorResponse { error: err.to_string(), error_type: err.error_type().to_string(), }), )); } }; let tool_grammar = if let Some((req_tools, tool_choice)) = req.tools.zip(req.tool_choice) { let tool_prompt = req.tool_prompt.unwrap_or_default(); let tools_to_use = match tool_choice { ToolType::FunctionName(name) => { vec![req_tools .iter() .find(|tool| tool.function.name == *name) .ok_or_else(|| { ( StatusCode::UNPROCESSABLE_ENTITY, Json(ErrorResponse { error: "Tool choice not found in tool names".to_string(), error_type: "Tool not found".to_string(), }), ) })? .clone()] } ToolType::OneOf => req_tools.to_owned(), }; let functions: HashMap<String, Value> = tools_to_use .iter() .map(|tool| { let func = tool.function.clone(); (func.name, func.parameters) }) .collect(); let tools = Tools { functions_map: FunctionsMap { functions }, properties: Properties { function: tools_to_use .iter() .map(|tool| FunctionRef { ref_path: format!("#/$functions/{}", tool.function.name.clone()), }) .collect(), }, }; let tools_str = serde_json::to_string(&tools).map_err(|e| { ( StatusCode::UNPROCESSABLE_ENTITY, Json(ErrorResponse { error: e.to_string(), error_type: "Input validation error".to_string(), }), ) })?; inputs = format!("{inputs}{tool_prompt}{tools_str}"); Some(GrammarType::Json(serde_json::json!(tools))) } else { None }; // build the request passing some parameters let generate_request = GenerateRequest { inputs: inputs.to_string(), parameters: GenerateParameters { best_of: None, temperature: req.temperature, repetition_penalty, frequency_penalty: req.frequency_penalty, top_k: None, top_p: req.top_p, typical_p: None, do_sample: true, max_new_tokens, return_full_text: None, stop, truncate: None, watermark: false, details: true, decoder_input_details: !stream, seed, top_n_tokens: None, grammar: tool_grammar.clone(), }, }; // static values that will be returned in all cases let model_id = info.model_id.clone(); let system_fingerprint = format!("{}-{}", info.version, info.docker_label.unwrap_or("native")); // switch on stream if stream { // pass this callback to the stream generation and build the required event structure let on_message_callback = move |stream_token: StreamResponse| { let event = Event::default(); let current_time = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_else(|_| std::time::Duration::from_secs(0)) .as_secs(); let logprobs = logprobs.then(|| { ChatCompletionLogprobs::from((stream_token.token.clone(), stream_token.top_tokens)) }); // replace the content with the tool calls if grammar is present let (content, tool_calls) = if tool_grammar.is_some() { (None, Some(vec![stream_token.token.text])) } else { (Some(stream_token.token.text), None) }; event .json_data(ChatCompletionChunk::new( model_id.clone(), system_fingerprint.clone(), content, tool_calls, current_time, logprobs, stream_token.details.map(|d| d.finish_reason.to_string()), )) .map_or_else( |e| { println!("Failed to serialize ChatCompletionChunk: {:?}", e); Event::default() }, |data| data, ) }; let (headers, response_stream) = generate_stream_internal( infer, compute_type, Json(generate_request), on_message_callback, ) .await; let sse = Sse::new(response_stream).keep_alive(KeepAlive::default()); Ok((headers, sse).into_response()) } else { let (headers, Json(generation)) = generate( Extension(infer), Extension(compute_type), Json(generate_request), ) .await?; let current_time = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_else(|_| std::time::Duration::from_secs(0)) .as_secs(); let (tool_calls, output) = if tool_grammar.is_some() { // gen_text should be valid json let gen_text_value: Value = serde_json::from_str(&generation.generated_text).map_err(|e| { ( StatusCode::UNPROCESSABLE_ENTITY, Json(ErrorResponse { error: e.to_string(), error_type: "Input validation error".to_string(), }), ) })?; let tool_call = Some(ToolCall { id: 0, r#type: "function".to_string(), function: FunctionDefinition { description: None, name: "tools".to_string(), parameters: gen_text_value.get("function").map_or_else( || { serde_json::from_str(&generation.generated_text).map_err(|e| { ( StatusCode::UNPROCESSABLE_ENTITY, Json(ErrorResponse { error: e.to_string(), error_type: "Input validation error".to_string(), }), ) }) }, |f| Ok(f.clone()), )?, }, }); (tool_call, None) } else { (None, Some(generation.generated_text)) }; // build the complete response object with the full text let response = ChatCompletion::new( model_id, system_fingerprint, output, current_time, generation.details.unwrap(), logprobs, tool_calls, ); // wrap generation inside a Vec to match api-inference Ok((headers, Json(response)).into_response()) } } /// Generate tokens from Vertex request #[utoipa::path( post, tag = "Text Generation Inference", path = "/vertex", request_body = VertexRequest, responses( (status = 200, description = "Generated Text", body = VertexResponse), (status = 424, description = "Generation Error", body = ErrorResponse, example = json ! ({"error": "Request failed during generation"})), (status = 429, description = "Model is overloaded", body = ErrorResponse, example = json ! ({"error": "Model is overloaded"})), (status = 422, description = "Input validation error", body = ErrorResponse, example = json ! ({"error": "Input validation error"})), (status = 500, description = "Incomplete generation", body = ErrorResponse, example = json ! ({"error": "Incomplete generation"})), ) )] #[instrument( skip_all, fields( total_time, validation_time, queue_time, inference_time, time_per_token, seed, ) )] async fn vertex_compatibility( Extension(infer): Extension<Infer>, Extension(compute_type): Extension<ComputeType>, Json(req): Json<VertexRequest>, ) -> Result<Response, (StatusCode, Json<ErrorResponse>)> { metrics::increment_counter!("tgi_request_count"); // check that theres at least one instance if req.instances.is_empty() { return Err(( StatusCode::UNPROCESSABLE_ENTITY, Json(ErrorResponse { error: "Input validation error".to_string(), error_type: "Input validation error".to_string(), }), )); } // Process all instances let predictions = req .instances .iter() .map(|instance| { let generate_request = GenerateRequest { inputs: instance.inputs.clone(), parameters: GenerateParameters { do_sample: true, max_new_tokens: instance.parameters.as_ref().and_then(|p| p.max_new_tokens), seed: instance.parameters.as_ref().and_then(|p| p.seed), details: true, decoder_input_details: true, ..Default::default() }, }; async { generate( Extension(infer.clone()), Extension(compute_type.clone()), Json(generate_request), ) .await .map(|(_, Json(generation))| generation.generated_text) .map_err(|_| { ( StatusCode::INTERNAL_SERVER_ERROR, Json(ErrorResponse { error: "Incomplete generation".into(), error_type: "Incomplete generation".into(), }), ) }) } }) .collect::<FuturesUnordered<_>>() .try_collect::<Vec<_>>() .await?; let response = VertexResponse { predictions }; Ok((HeaderMap::new(), Json(response)).into_response()) } /// Tokenize inputs #[utoipa::path( post, tag = "Text Generation Inference", path = "/tokenize", request_body = GenerateRequest, responses( (status = 200, description = "Tokenized ids", body = TokenizeResponse), (status = 404, description = "No tokenizer found", body = ErrorResponse, example = json ! ({"error": "No fast tokenizer available"})), ) )] #[instrument(skip_all)] async fn tokenize( Extension(infer): Extension<Infer>, Json(req): Json<GenerateRequest>, ) -> Result<Json<TokenizeResponse>, (StatusCode, Json<ErrorResponse>)> { let input = req.inputs.clone(); let encoding = infer.tokenize(req).await?; if let Some(encoding) = encoding { let tokens: Vec<SimpleToken> = encoding .get_ids() .iter() .zip(encoding.get_offsets()) .map(|(&id, &(start, stop))| { let text: String = input.chars().skip(start).take(stop - start).collect(); SimpleToken { id, text, start, stop, } }) .collect(); Ok(Json(TokenizeResponse(tokens))) } else { Err(( StatusCode::NOT_FOUND, Json(ErrorResponse { error: "No fast tokenizer or tokenizer.json for this model".to_string(), error_type: "no fast tokenizer".to_string(), }), )) } } /// Prometheus metrics scrape endpoint #[utoipa::path( get, tag = "Text Generation Inference", path = "/metrics", responses((status = 200, description = "Prometheus Metrics", body = String)) )] async fn metrics(prom_handle: Extension<PrometheusHandle>) -> String { prom_handle.render() } #[derive(Clone, Debug)] pub(crate) struct ComputeType(String); /// Serving method #[allow(clippy::too_many_arguments)] pub async fn run( model_info: HubModelInfo, shard_info: ShardInfo, compat_return_full_text: bool, max_concurrent_requests: usize, max_best_of: usize, max_stop_sequences: usize, max_top_n_tokens: u32, max_input_length: usize, max_total_tokens: usize, waiting_served_ratio: f32, max_batch_prefill_tokens: u32, max_batch_total_tokens: u32, max_waiting_tokens: usize, max_batch_size: Option<usize>, client: ShardedClient, tokenizer: Option<Tokenizer>, validation_workers: usize, addr: SocketAddr, allow_origin: Option<AllowOrigin>, ngrok: bool, ngrok_authtoken: Option<String>, ngrok_edge: Option<String>, tokenizer_config: HubTokenizerConfig, messages_api_enabled: bool, grammar_support: bool, ) -> Result<(), axum::BoxError> { // OpenAPI documentation #[derive(OpenApi)] #[openapi( paths( health, get_model_info, compat_generate, generate, generate_stream, chat_completions, completions, tokenize, metrics, ), components( schemas( Info, CompatGenerateRequest, GenerateRequest, GrammarType, ChatRequest, Message, ChatCompletionComplete, ChatCompletionChoice, ChatCompletionDelta, ChatCompletionChunk, ChatCompletionLogprob, ChatCompletionLogprobs, ChatCompletionTopLogprob, ChatCompletion, CompletionRequest, CompletionComplete, CompletionCompleteChunk, GenerateParameters, PrefillToken, Token, GenerateResponse, TokenizeResponse, SimpleToken, BestOfSequence, Details, FinishReason, StreamResponse, StreamDetails, ErrorResponse, GrammarType, Usage, ) ), tags( (name = "Text Generation Inference", description = "Hugging Face Text Generation Inference API") ), info( title = "Text Generation Inference", license( name = "Apache 2.0", url = "https://www.apache.org/licenses/LICENSE-2.0" ) ) )] struct ApiDoc; // Create state let validation = Validation::new( validation_workers, tokenizer, max_best_of, max_stop_sequences, max_top_n_tokens, max_input_length, max_total_tokens, grammar_support, ); let generation_health = Arc::new(AtomicBool::new(false)); let health_ext = Health::new(client.clone(), generation_health.clone()); let infer = Infer::new( client, validation, waiting_served_ratio, max_batch_prefill_tokens, max_batch_total_tokens, max_waiting_tokens, max_batch_size, max_concurrent_requests, shard_info.requires_padding, shard_info.window_size, shard_info.speculate, generation_health, tokenizer_config, ); // Duration buckets let duration_matcher = Matcher::Suffix(String::from("duration")); let n_duration_buckets = 35; let mut duration_buckets = Vec::with_capacity(n_duration_buckets); // Minimum duration in seconds let mut value = 0.0001; for _ in 0..n_duration_buckets { // geometric sequence value *= 1.5; duration_buckets.push(value); } // Input Length buckets let input_length_matcher = Matcher::Full(String::from("tgi_request_input_length")); let input_length_buckets: Vec<f64> = (0..100) .map(|x| (max_input_length as f64 / 100.0) * (x + 1) as f64) .collect(); // Generated tokens buckets let generated_tokens_matcher = Matcher::Full(String::from("tgi_request_generated_tokens")); let generated_tokens_buckets: Vec<f64> = (0..100) .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64) .collect(); // Input Length buckets let max_new_tokens_matcher = Matcher::Full(String::from("tgi_request_max_new_tokens")); let max_new_tokens_buckets: Vec<f64> = (0..100) .map(|x| (max_total_tokens as f64 / 100.0) * (x + 1) as f64) .collect(); // Batch size buckets let batch_size_matcher = Matcher::Full(String::from("tgi_batch_next_size")); let batch_size_buckets: Vec<f64> = (0..1024).map(|x| (x + 1) as f64).collect(); // Speculated tokens buckets let skipped_matcher = Matcher::Full(String::from("tgi_request_skipped_tokens")); let skipped_buckets: Vec<f64> = (0..shard_info.speculate + 1).map(|x| x as f64).collect(); // Prometheus handler let builder = PrometheusBuilder::new() .set_buckets_for_metric(duration_matcher, &duration_buckets) .unwrap() .set_buckets_for_metric(input_length_matcher, &input_length_buckets) .unwrap() .set_buckets_for_metric(generated_tokens_matcher, &generated_tokens_buckets) .unwrap() .set_buckets_for_metric(max_new_tokens_matcher, &max_new_tokens_buckets) .unwrap() .set_buckets_for_metric(batch_size_matcher, &batch_size_buckets) .unwrap() .set_buckets_for_metric(skipped_matcher, &skipped_buckets) .unwrap(); let prom_handle = builder .install_recorder() .expect("failed to install metrics recorder"); // CORS layer let allow_origin = allow_origin.unwrap_or(AllowOrigin::any()); let cors_layer = CorsLayer::new() .allow_methods([Method::GET, Method::POST]) .allow_headers([http::header::CONTENT_TYPE]) .allow_origin(allow_origin); // Endpoint info let info = Info { model_id: model_info.model_id, model_sha: model_info.sha, model_dtype: shard_info.dtype, model_device_type: shard_info.device_type, model_pipeline_tag: model_info.pipeline_tag, max_concurrent_requests, max_best_of, max_stop_sequences, max_input_length, max_total_tokens, waiting_served_ratio, max_batch_total_tokens, max_waiting_tokens, max_batch_size, validation_workers, version: env!("CARGO_PKG_VERSION"), sha: option_env!("VERGEN_GIT_SHA"), docker_label: option_env!("DOCKER_LABEL"), }; // Define VertextApiDoc conditionally only if the "google" feature is enabled let doc = { // avoid `mut` if possible #[cfg(feature = "google")] { use crate::VertexInstance; #[derive(OpenApi)] #[openapi( paths(vertex_compatibility), components(schemas(VertexInstance, VertexRequest, VertexResponse)) )] struct VertextApiDoc; // limiting mutability to the smallest scope necessary let mut doc = ApiDoc::openapi(); doc.merge(VertextApiDoc::openapi()); doc } #[cfg(not(feature = "google"))] ApiDoc::openapi() }; // Configure Swagger UI let swagger_ui = SwaggerUi::new("/docs").url("/api-doc/openapi.json", doc); // Define base and health routes let base_routes = Router::new() .route("/", post(compat_generate)) .route("/", get(health)) .route("/info", get(get_model_info)) .route("/generate", post(generate)) .route("/generate_stream", post(generate_stream)) .route("/v1/chat/completions", post(chat_completions)) .route("/v1/completions", post(completions)) .route("/vertex", post(vertex_compatibility)) .route("/tokenize", post(tokenize)) .route("/health", get(health)) .route("/ping", get(health)) .route("/metrics", get(metrics)); // Conditional AWS Sagemaker route let aws_sagemaker_route = if messages_api_enabled { Router::new().route("/invocations", post(chat_completions)) // Use 'chat_completions' for OAI_ENABLED } else { Router::new().route("/invocations", post(compat_generate)) // Use 'compat_generate' otherwise }; let compute_type = ComputeType(std::env::var("COMPUTE_TYPE").unwrap_or("gpu+optimized".to_string())); // Combine routes and layers let mut app = Router::new() .merge(swagger_ui) .merge(base_routes) .merge(aws_sagemaker_route); #[cfg(feature = "google")] { tracing::info!("Built with `google` feature"); tracing::info!( "Environment variables `AIP_PREDICT_ROUTE` and `AIP_HEALTH_ROUTE` will be respected." ); if let Ok(env_predict_route) = std::env::var("AIP_PREDICT_ROUTE") { app = app.route(&env_predict_route, post(vertex_compatibility)); } if let Ok(env_health_route) = std::env::var("AIP_HEALTH_ROUTE") { app = app.route(&env_health_route, get(health)); } } // add layers after routes app = app .layer(Extension(info)) .layer(Extension(health_ext.clone())) .layer(Extension(compat_return_full_text)) .layer(Extension(infer)) .layer(Extension(compute_type)) .layer(Extension(prom_handle.clone())) .layer(OtelAxumLayer::default()) .layer(cors_layer); if ngrok { #[cfg(feature = "ngrok")] { use ngrok::config::TunnelBuilder; let _ = addr; let authtoken = ngrok_authtoken.expect("`ngrok-authtoken` must be set when using ngrok tunneling"); let edge = ngrok_edge.expect("`ngrok-edge` must be set when using ngrok tunneling"); let tunnel = ngrok::Session::builder() .authtoken(authtoken) .connect() .await .unwrap() .labeled_tunnel() .label("edge", edge); let listener = tunnel.listen().await.unwrap(); // Run prom metrics and health locally too tokio::spawn( axum::Server::bind(&addr) .serve( Router::new() .route("/health", get(health)) .route("/metrics", get(metrics)) .layer(Extension(health_ext)) .layer(Extension(prom_handle)) .into_make_service(), ) //Wait until all requests are finished to shut down .with_graceful_shutdown(shutdown_signal()), ); // Run server axum::Server::builder(listener) .serve(app.into_make_service()) //Wait until all requests are finished to shut down .with_graceful_shutdown(shutdown_signal()) .await?; } #[cfg(not(feature = "ngrok"))] { let _ngrok_authtoken = ngrok_authtoken; let _ngrok_domain = ngrok_domain; let _ngrok_username = ngrok_username; let _ngrok_password = ngrok_password; panic!("`text-generation-router` was compiled without the `ngrok` feature"); } } else { // Run server axum::Server::bind(&addr) .serve(app.into_make_service()) // Wait until all requests are finished to shut down .with_graceful_shutdown(shutdown_signal()) .await?; } Ok(()) } /// Shutdown signal handler async fn shutdown_signal() { let ctrl_c = async { signal::ctrl_c() .await .expect("failed to install Ctrl+C handler"); }; #[cfg(unix)] let terminate = async { signal::unix::signal(signal::unix::SignalKind::terminate()) .expect("failed to install signal handler") .recv() .await; }; #[cfg(not(unix))] let terminate = std::future::pending::<()>(); tokio::select! { _ = ctrl_c => {}, _ = terminate => {}, } tracing::info!("signal received, starting graceful shutdown"); opentelemetry::global::shutdown_tracer_provider(); } impl From<i32> for FinishReason { fn from(finish_reason: i32) -> Self { let finish_reason = text_generation_client::FinishReason::try_from(finish_reason).unwrap(); match finish_reason { text_generation_client::FinishReason::Length => FinishReason::Length, text_generation_client::FinishReason::EosToken => FinishReason::EndOfSequenceToken, text_generation_client::FinishReason::StopSequence => FinishReason::StopSequence, } } } /// Convert to Axum supported formats impl From<InferError> for (StatusCode, Json<ErrorResponse>) { fn from(err: InferError) -> Self { let status_code = match err { InferError::GenerationError(_) => StatusCode::FAILED_DEPENDENCY, InferError::Overloaded(_) => StatusCode::TOO_MANY_REQUESTS, InferError::ValidationError(_) => StatusCode::UNPROCESSABLE_ENTITY, InferError::IncompleteGeneration => StatusCode::INTERNAL_SERVER_ERROR, InferError::TemplateError(_) => StatusCode::UNPROCESSABLE_ENTITY, }; ( status_code, Json(ErrorResponse { error: err.to_string(), error_type: err.error_type().to_string(), }), ) } } impl From<InferError> for Event { fn from(err: InferError) -> Self { Event::default() .json_data(ErrorResponse { error: err.to_string(), error_type: err.error_type().to_string(), }) .unwrap() } }
text-generation-inference/router/src/server.rs/0
{ "file_path": "text-generation-inference/router/src/server.rs", "repo_id": "text-generation-inference", "token_count": 27571 }
220
// Adapted from turboderp exllama: https://github.com/turboderp/exllama #ifndef _cuda_compat_cuh #define _cuda_compat_cuh // atomicAdd for half types, to support CC < 7.x __device__ __forceinline__ void atomicAdd_half(half* address, half val) { unsigned int * address_as_ui = (unsigned int *) ((char *)address - ((size_t)address & 2)); unsigned int old = *address_as_ui; unsigned int assumed; do { assumed = old; __half_raw hsum; hsum.x = (size_t)address & 2 ? (old >> 16) : (old & 0xffff); half tmpres = __hadd(hsum, val); hsum = __half_raw(tmpres); old = (size_t)address & 2 ? (old & 0xffff) | (hsum.x << 16) : (old & 0xffff0000) | hsum.x; old = atomicCAS(address_as_ui, assumed, old); } while (assumed != old); } // atomicAdd for half2 types __device__ __forceinline__ void atomicAdd_half2(half2* address, half2 val) { unsigned int* address_as_ui = (unsigned int*)address; unsigned int old = *address_as_ui; unsigned int assumed; do { assumed = old; half2 old_val = *((half2*)&old); half2 new_val = __hadd2(old_val, val); old = atomicCAS(address_as_ui, assumed, *((unsigned int*)&new_val)); } while (assumed != old); } // #if defined(__CUDA_ARCH__) || defined(USE_ROCM) #if __CUDA_ARCH__ < 700 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half* address, half val) { atomicAdd_half(address, val); } #if __CUDA_ARCH__ < 600 || defined(USE_ROCM) __device__ __forceinline__ void atomicAdd(half2* address, half2 val) { atomicAdd_half2(address, val); } #endif #endif #endif #endif
text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh/0
{ "file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cu_compat.cuh", "repo_id": "text-generation-inference", "token_count": 692 }
221
#ifndef _util_h #define _util_h #define DBGS(__x) printf("%s\n", __x) #define DBGI(__x) printf("%s: %i\n", #__x, __x) #define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y) #define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z) #define DBGF(__x) printf("%s: %f\n", #__x, __x) #define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y) #define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z) #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cpp/util.h", "repo_id": "text-generation-inference", "token_count": 296 }
222
#ifndef _util_cuh #define _util_cuh #include <cuda_runtime.h> #include <cuda_fp16.h> #include <cstdint> #include <cstdio> #include <ATen/cuda/CUDAContext.h> #define DIVIDE(x, size) (((x) + (size) - 1) / (size)) #define DBGS(__x) printf("%s\n", __x) #define DBGI(__x) printf("%s: %i\n", #__x, __x) #define DBGI2(__x, __y) printf("%s, %s: %i, %i\n", #__x, #__y, __x, __y) #define DBGI3(__x, __y, __z) printf("%s, %s, %s: %i, %i, %i\n", #__x, #__y, #__z, __x, __y, __z) #define DBGX(__x) printf("%s: %x\n", #__x, __x) #define DBGX2(__x, __y) printf("%s, %s: %x, %x\n", #__x, #__y, __x, __y) #define DBGX3(__x, __y, __z) printf("%s, %s, %s: %x, %x, %x\n", #__x, #__y, #__z, __x, __y, __z) #define DBGF(__x) printf("%s: %f\n", #__x, __x) #define DBGF2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __x, __y) #define DBGF3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __x, __y, __z) #define DBGH(__x) printf("%s: %f\n", #__x, __half2float(__x)) #define DBGH2(__x, __y) printf("%s, %s: %f, %f\n", #__x, #__y, __half2float(__x), __half2float(__y)) #define DBGH3(__x, __y, __z) printf("%s, %s, %s: %f, %f, %f\n", #__x, #__y, #__z, __half2float(__x), __half2float(__y), __half2float(__z)) #define DBGIH(__x, __y) printf("%s, %s: %i, %f\n", #__x, #__y, __x, __half2float(__y)) #define DBGIH2(__x, __y, __z) printf("%s, %s, %s: %i, %f, %f\n", #__x, #__y, #__z, __x, __half2float(__y), __half2float(__z)) __forceinline__ __device__ half dq_scale_(const int qs, const half max_scale) { half qs_h = __hmul(__int2half_rn(qs + 1), __float2half_rn(1.0f / 16.0f)); qs_h = __hmul(qs_h, qs_h); qs_h = __hmul(qs_h, max_scale); return qs_h; } __forceinline__ __device__ float clamp(float x, float a, float b) { return fmaxf(a, fminf(b, x)); } #define cuda_check(ans) { gpu_assert((ans), __FILE__, __LINE__); } inline void gpu_assert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA error: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void print_global_mem(const half* ptr, int rows, int columns, int stride); #endif
text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh/0
{ "file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/util.cuh", "repo_id": "text-generation-inference", "token_count": 1115 }
223
import torch from text_generation_server.utils.layers import ( TensorParallelEmbedding, ) class ProcessGroup: def __init__(self, rank: int, world_size: int): self._rank = rank self.world_size = world_size def size(self) -> int: return self.world_size def rank(self) -> int: return self._rank class Weights: def __init__(self, rank: int, world_size: int, vocab_size: int, hidden_dim: int): self.weight = ( torch.arange(vocab_size * hidden_dim).float().view(vocab_size, hidden_dim) ) self.process_group = ProcessGroup(rank, world_size) def get_partial_sharded(self, name: str, dim: int): assert dim == 0 rank = self.process_group.rank() world_size = self.process_group.size() size = self.weight.shape[dim] block_size = (size + world_size - 1) // world_size start = rank * block_size stop = (rank + 1) * block_size return self.weight[start:stop] def get_shape(self, name: str): return self.weight.shape def test_weight_hub_files_offline_error(): vocab_size = 17 weights = Weights(rank=0, world_size=1, vocab_size=vocab_size, hidden_dim=256) embeddings = TensorParallelEmbedding("", weights) input_ids = torch.arange(vocab_size) output = embeddings.forward(input_ids) assert embeddings.min_id == 0 assert embeddings.max_id == 17 torch.testing.assert_close(output, torch.arange(256 * 17).float().view(17, 256)) weights_0_2 = Weights(rank=0, world_size=2, vocab_size=vocab_size, hidden_dim=256) weights_1_2 = Weights(rank=1, world_size=2, vocab_size=vocab_size, hidden_dim=256) embeddings_0_2 = TensorParallelEmbedding("", weights_0_2, reduce=False) assert embeddings_0_2.min_id == 0 assert embeddings_0_2.max_id == 9 torch.testing.assert_close( embeddings_0_2.weight, torch.cat([torch.arange(9 * 256), torch.zeros(256)], dim=0) .view(10, 256) .float(), ) embeddings_1_2 = TensorParallelEmbedding("", weights_1_2, reduce=False) assert embeddings_1_2.min_id == 9 assert embeddings_1_2.max_id == 17 torch.testing.assert_close( embeddings_1_2.weight, torch.cat([torch.arange(8 * 256) + 9 * 256, torch.zeros(256)], dim=0) .view(9, 256) .float(), ) output_tp_0 = embeddings_0_2.forward(input_ids) output_tp_1 = embeddings_1_2.forward(input_ids) torch.testing.assert_close(output, output_tp_0 + output_tp_1)
text-generation-inference/server/tests/utils/test_layers.py/0
{ "file_path": "text-generation-inference/server/tests/utils/test_layers.py", "repo_id": "text-generation-inference", "token_count": 1111 }
224
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.distributed import numpy as np from torch import nn from transformers.activations import ACT2FN from transformers.configuration_utils import PretrainedConfig from typing import Optional, List, Tuple from loguru import logger from text_generation_server.utils import paged_attention, flash_attn from text_generation_server.utils.layers import ( FastLinear, FastRMSNorm, TensorParallelRowLinear, TensorParallelColumnLinear, TensorParallelEmbedding, PositionRotaryEmbedding, SpeculativeHead, get_linear, ) HAS_MEGABLOCKS = True try: import stk import megablocks.ops as ops except ImportError: logger.warning("Mixtral: megablocks is not installed") HAS_MEGABLOCKS = False class MixtralConfig(PretrainedConfig): model_type = "mixtral" def __init__( self, vocab_size=32000, hidden_size=4096, intermediate_size=14336, num_hidden_layers=32, num_attention_heads=32, num_key_value_heads=8, hidden_act="silu", max_position_embeddings=4096 * 32, initializer_range=0.02, rms_norm_eps=1e-05, use_cache=True, pad_token_id=None, bos_token_id=1, eos_token_id=2, pretraining_tp=1, tie_word_embeddings=False, rope_theta=10000.0, sliding_window=None, num_experts_per_tok=2, num_local_experts=8, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.hidden_size = hidden_size self.intermediate_size = intermediate_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.sliding_window = sliding_window # for backward compatibility if num_key_value_heads is None: num_key_value_heads = num_attention_heads self.num_key_value_heads = num_key_value_heads self.hidden_act = hidden_act self.initializer_range = initializer_range self.rms_norm_eps = rms_norm_eps self.pretraining_tp = pretraining_tp self.use_cache = use_cache self.rope_theta = rope_theta self.num_experts_per_tok = num_experts_per_tok self.num_local_experts = num_local_experts super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, tie_word_embeddings=tie_word_embeddings, **kwargs, ) def promote_scalar(x: torch.Tensor) -> torch.Tensor: return x.view(1) if len(x.size()) == 0 else x def load_attention(config, prefix, weights): if config.num_attention_heads != config.num_key_value_heads: return _load_gqa(config, prefix, weights) else: return TensorParallelColumnLinear.load_multi( config, prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], dim=0, weights=weights, bias=False, ) def _load_gqa(config, prefix: str, weights): assert config.hidden_size % config.num_attention_heads == 0 assert config.num_attention_heads % weights.process_group.size() == 0 weight = weights.get_multi_weights_col( prefixes=[f"{prefix}.q_proj", f"{prefix}.k_proj", f"{prefix}.v_proj"], quantize=config.quantize, dim=0, ) if config.quantize not in ["gptq", "awq"]: weight = weight.to(dtype=weights.dtype).to(device=weights.device) head_size = config.hidden_size // config.num_attention_heads num_heads = config.num_attention_heads // weights.process_group.size() num_key_value_heads = config.num_key_value_heads // weights.process_group.size() assert list(weight.shape) == [ (num_heads + 2 * num_key_value_heads) * head_size, config.hidden_size, ], f"{list(weight.shape)} != {[(num_heads + 2 * config.num_key_value_heads) * head_size, config.hidden_size]}" return TensorParallelColumnLinear( get_linear(weight, bias=None, quantize=config.quantize) ) def _load_experts(config, prefix, mat, weights): if config.quantize is not None: raise NotImplementedError("Mixtral does not support weight quantization yet.") assert mat in ["w1", "w2", "w3"] world_size = weights.process_group.size() rank = weights.process_group.rank() assert ( config.intermediate_size % world_size == 0 ), f"The chosen size {config.intermediate_size} is not compatible with sharding on {world_size} shards" block_size = config.intermediate_size // world_size start = rank * block_size stop = (rank + 1) * block_size tensor = torch.empty( (config.num_local_experts * block_size, config.hidden_size), dtype=weights.dtype, device=weights.device, ) for i in range(config.num_local_experts): slice_ = weights._get_slice(f"{prefix}.{i}.{mat}.weight") if mat == "w2": expert_slice = slice_[:, start:stop].t().contiguous() else: expert_slice = slice_[start:stop] tensor[i * block_size : (i + 1) * block_size] = expert_slice.to( dtype=weights.dtype ).to(device=weights.device) return tensor class MixtralAttention(torch.nn.Module): def __init__( self, prefix: str, config, weights, ): super().__init__() self.max_past = ( config.sliding_window if config.sliding_window is not None else -1 ) self.num_heads = config.num_attention_heads self.hidden_size = config.hidden_size self.head_size = self.hidden_size // self.num_heads self.rotary_emb = PositionRotaryEmbedding.static( config=config, dim=self.head_size, base=config.rope_theta, device=weights.device, ) self.softmax_scale = self.head_size**-0.5 if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // weights.process_group.size() self.num_key_value_heads = ( config.num_key_value_heads // weights.process_group.size() ) self.query_key_value = load_attention(config, prefix, weights) self.o_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.o_proj", weights=weights, bias=False, ) self.num_groups = self.num_heads // self.num_key_value_heads self.kv_head_mapping = torch.arange( 0, self.num_key_value_heads, dtype=torch.int32, device=weights.device ).repeat_interleave(self.num_groups) def forward( self, hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ): qkv = self.query_key_value(hidden_states) query, kv = qkv.split( [ self.head_size * self.num_heads, 2 * self.head_size * self.num_key_value_heads, ], dim=1, ) query = query.view(-1, self.num_heads, self.head_size) kv = kv.view(-1, 2, self.num_key_value_heads, self.head_size) self.rotary_emb(query, torch.select(kv, dim=1, index=0), cos, sin) if prefill_cache_indices is not None: kv_to_cache = kv[prefill_cache_indices] else: kv_to_cache = kv paged_attention.reshape_and_cache( kv_to_cache[:, 0], kv_to_cache[:, 1], kv_cache[0], kv_cache[1], slots ) # output tensor attn_output = torch.empty_like(query) # Prefill if cu_seqlen_prefill is not None: # flash attention flash_attn.attention( query, torch.select(kv, dim=1, index=0), torch.select(kv, dim=1, index=1), attn_output, cu_seqlen_prefill, max_s, self.softmax_scale, window_size_left=self.max_past, ) # Decode else: paged_attention.attention( attn_output, query, kv_cache[0], kv_cache[1], self.kv_head_mapping, self.softmax_scale, block_tables, input_lengths, max_s, ) return self.o_proj(attn_output.view(-1, self.num_heads * self.head_size)) @torch.jit.script def select_experts(gate_logits: torch.Tensor, top_k: int): # all_probs: (sequence_length, n_experts) and upcast for softmax all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) # weights, selected_experts: (sequence_length, top-k) weights, selected_experts = torch.topk(all_probs, top_k, dim=-1) weights /= weights.sum(dim=-1, keepdim=True) weights = weights.view(-1) selected_experts = selected_experts.view(-1) return selected_experts, weights @torch.jit.script def round_up(x: torch.Tensor, value: int): return torch.div(x + (value - 1), value, rounding_mode="trunc") * value class BlockSparseMoE(nn.Module): """ Built on the paper and library Megablocks as described in https://arxiv.org/abs/2211.15841. This implementation is strictly equivalent to standard MoE with full capacity (no dropped tokens). It's faster since it formulates MoE operations in terms of block-sparse operations to accomodate imbalanced assignments of tokens to experts, whereas standard MoE either (1) drop tokens at the cost of reduced performance or (2) set capacity factor to number of experts and thus waste computation and memory on padding. """ def __init__(self, prefix, config: MixtralConfig, weights): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size // weights.process_group.size() self.num_experts = config.num_local_experts self.top_k = config.num_experts_per_tok act = config.hidden_act if "gelu" in act: self.act = lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) elif "silu" in act: self.act = torch.nn.functional.silu else: self.act = ACT2FN[act] # gating self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False) # merged expert weights, all of size (n_experts * ffn_dim, hidden_dim) self.w1 = _load_experts(config, f"{prefix}.experts", "w1", weights) self.w2 = _load_experts(config, f"{prefix}.experts", "w2", weights) self.w3 = _load_experts(config, f"{prefix}.experts", "w3", weights) self.offsets = None self.offsets_block_rows = 0 self.process_group = weights.process_group # Calculate the number of bits needed to represent the expert indices # so that we can pass it to radix sort. self.sort_end_bit = max(int(np.ceil(np.log2(self.num_experts))), 1) self.blocking = 128 self.quantize_scatter_num_bits = -1 def topology(self, x: torch.Tensor, padded_bins: torch.Tensor): padded_tokens, _ = x.size() assert padded_tokens % self.blocking == 0 assert self.ffn_dim % self.blocking == 0 # Offsets for the sparse matrix. All rows have the # same number of nonzero blocks dictated by the # dimensionality of a single expert. block_rows = padded_tokens // self.blocking blocks_per_row = self.ffn_dim // self.blocking if self.offsets is None or block_rows > self.offsets_block_rows: self.offsets = torch.arange( 0, block_rows * blocks_per_row + 1, blocks_per_row, dtype=torch.int32, device=x.device, ) self.offsets_block_rows = block_rows offsets = self.offsets else: offsets = self.offsets[: block_rows + 1] # Indices for the sparse matrix. The indices for # the intermediate matrix are dynamic depending # on the mapping of tokens to experts. column_indices = ops.topology( padded_bins, self.blocking, block_rows, blocks_per_row ) # For now, use meta init to save the device memory. data = torch.empty( column_indices.numel(), self.blocking, self.blocking, dtype=x.dtype, device="meta", ) shape = (padded_tokens, self.ffn_dim * self.num_experts) row_indices = stk.ops.row_indices(shape, data, offsets, column_indices) return stk.Matrix( shape, data, row_indices, column_indices, offsets, False, False, False, ) def indices_and_padded_bins(self, selected_experts: torch.Tensor): # Sort the expert ids to produce the scatter/gather # indices for the permutation. # selected_experts = selected_experts.int() # returns bin_ids == num of experts for this sequence ? == unique selected experts? # and indices == how to sort tokens? bin_ids, indices = ops.sort(selected_experts, self.sort_end_bit) # bin_ids => [0, 0, 0, 2, 2, ...] => [num_tokens * top_k] # indices => [14, 32, 33, ...] => [num_tokens * top_k] # Histogram the expert ids to identify the number of # tokens routed to each expert. tokens_per_expert = ops.histogram(selected_experts, self.num_experts) # tokens_per_expert => [3, 0, 2, ...] => [num_experts] # Round the token counts up to the block size used in # the matrix muliplications. Caculate the starting # position of each bin. # List of size num_experts padded_tokens_per_expert = round_up(tokens_per_expert, self.blocking) # padded_tokens_per_expert => [128, O, 128, ...] # Cumulative selected experts per token padded_bins = ops.inclusive_cumsum(padded_tokens_per_expert, 0) padded_bins = promote_scalar(padded_bins) # padded_bins => [128, 128, 256, ...] # Calculate the bin bounds for the sorted tokens. bins = ops.inclusive_cumsum(tokens_per_expert, 0) bins = promote_scalar(bins) # bins => [3, 3, 5, ...] return indices, bin_ids, bins, padded_bins, tokens_per_expert def sparse_forward(self, x: torch.Tensor) -> torch.Tensor: """ x: (sequence_length, model_dim) gate_logits: (sequence_length, n_experts) """ # optional reshape input_shape = x.shape x = x.view(-1, input_shape[-1]) # gate_logits: (sequence_length, n_experts) gate_logits = self.gate(x) selected_experts, weights = select_experts(gate_logits, self.top_k) ( indices, bin_ids, bins, padded_bins, _, ) = self.indices_and_padded_bins(selected_experts) # Permute tokens and pad to prepare expert computation # (top_k * sequence_length + padding, model_dim) x = ops.padded_gather(x, indices, bin_ids, bins, padded_bins, self.top_k) # Create the sparse matrix topology with torch.no_grad(): topo = self.topology(x, padded_bins) # Perform the expert computation # First Dense x Dense -> Sparse for w1 and w3, # (top_k * sequence_length + padding, ffn_dim * n_experts) x = stk.Matrix( topo.size(), self.act(stk.ops.sdd(x, self.w1.t(), topo).data) * stk.ops.sdd(x, self.w3.t(), topo).data, topo.row_indices, topo.column_indices, topo.offsets, topo.column_indices_t, topo.offsets_t, topo.block_offsets_t, ) # Then Sparse x Dense -> Dense for w2 # (top_k * sequence_length + padding, model_dim) x = stk.ops.dsd(x, self.w2) # Permute back and remove padding # (sequence_length, model_dim) x = ops.padded_scatter( x, indices, bin_ids, weights, bins, padded_bins, self.top_k, self.quantize_scatter_num_bits, ).view(*input_shape) if self.process_group.size() > 1: torch.distributed.all_reduce(x, group=self.process_group) return x.view(*input_shape) def dense_forward(self, x: torch.Tensor) -> torch.Tensor: """ x: (sequence_length, model_dim) gate_logits: (sequence_length, n_experts) """ # optional reshape input_shape = x.shape x = x.view(-1, input_shape[-1]) # gate_logits: (sequence_length, n_experts) gate_logits = self.gate(x) # all_probs: (sequence_length, n_experts) and upcast for softmax all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) if self.top_k < self.num_experts: _, not_selected_experts = torch.topk( all_probs, self.num_experts - self.top_k, largest=False, sorted=False, dim=1, ) # Mask not selected experts all_probs.scatter_(1, not_selected_experts, 0) # Re-normalize weights = all_probs / all_probs.sum(dim=1, keepdim=True) # Expand to [num_experts, sequence_length, model_dim] x = x.view(1, -1, input_shape[-1]).expand(self.num_experts, -1, input_shape[-1]) # Permute to [num_experts, model_dim, ffn_dim] w1 = self.w1.view(self.num_experts, self.ffn_dim, self.hidden_dim).permute( 0, 2, 1 ) w3 = self.w3.view(self.num_experts, self.ffn_dim, self.hidden_dim).permute( 0, 2, 1 ) inter = self.act(torch.bmm(x, w1)) * torch.bmm(x, w3) out = torch.bmm( inter, self.w2.view(self.num_experts, self.ffn_dim, self.hidden_dim) ) # Mask not selected experts out *= weights.t().view(self.num_experts, -1, 1) # Sum experts out = out.sum(0) # Reduce sum if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out def forward(self, x: torch.Tensor) -> torch.Tensor: if len(x) > 256 and HAS_MEGABLOCKS: return self.sparse_forward(x) # This is faster when there is not a lot of tokens return self.dense_forward(x) class DenseMoE(nn.Module): def __init__(self, prefix, config: MixtralConfig, weights): super().__init__() self.hidden_dim = config.hidden_size self.ffn_dim = config.intermediate_size // weights.process_group.size() self.num_experts = config.num_local_experts self.top_k = config.num_experts_per_tok act = config.hidden_act if "gelu" in act: self.act = lambda x: torch.nn.functional.gelu( x, approximate=( "tanh" if act in ["gelu_fast", "gelu_pytorch_tanh"] else "none" ), ) elif "silu" in act: self.act = torch.nn.functional.silu else: self.act = ACT2FN[act] # gating self.gate = FastLinear.load(config, f"{prefix}.gate", weights, bias=False) self.w1 = [ TensorParallelColumnLinear.load( config, prefix=f"{prefix}.experts.{i}.w1", weights=weights, bias=False ) for i in range(self.num_experts) ] self.w3 = [ TensorParallelColumnLinear.load( config, prefix=f"{prefix}.experts.{i}.w3", weights=weights, bias=False ) for i in range(self.num_experts) ] self.w2 = [ TensorParallelRowLinear.load( config, prefix=f"{prefix}.experts.{i}.w2", weights=weights, bias=False ) for i in range(self.num_experts) ] self.process_group = weights.process_group def forward(self, x: torch.Tensor) -> torch.Tensor: """ x: (sequence_length, model_dim) gate_logits: (sequence_length, n_experts) """ # optional reshape input_shape = x.shape x = x.view(-1, input_shape[-1]) # gate_logits: (sequence_length, n_experts) gate_logits = self.gate(x) # all_probs: (sequence_length, n_experts) and upcast for softmax all_probs = torch.nn.functional.softmax(gate_logits, dim=1, dtype=torch.float) if self.top_k < self.num_experts: _, not_selected_experts = torch.topk( all_probs, self.num_experts - self.top_k, largest=False, sorted=False, dim=1, ) # Mask not selected experts all_probs.scatter_(1, not_selected_experts, 0) # Re-normalize weights = all_probs / all_probs.sum(dim=1, keepdim=True) # Final output tensor out = x.new_zeros(x.shape[0], self.hidden_dim) for i in range(self.num_experts): h = self.act(self.w1[i](x)) * self.w3[i](x) h = self.w2[i](h, reduce=False) # Add expert output to out with masking out += h * weights[:, i].view(-1, 1) # Reduce sum if self.process_group.size() > 1: torch.distributed.all_reduce(out, group=self.process_group) return out class MixtralLayer(nn.Module): def __init__(self, layer_id, config, weights): super().__init__() prefix = f"model.layers.{layer_id}" self.self_attn = MixtralAttention( prefix=f"{prefix}.self_attn", config=config, weights=weights ) moe_cls = BlockSparseMoE if config.quantize is None else DenseMoE self.moe = moe_cls(f"{prefix}.block_sparse_moe", config, weights) self.input_layernorm = FastRMSNorm.load( prefix=f"{prefix}.input_layernorm", weights=weights, eps=config.rms_norm_eps ) self.post_attention_layernorm = FastRMSNorm.load( prefix=f"{prefix}.post_attention_layernorm", weights=weights, eps=config.rms_norm_eps, ) def forward( self, hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ): normed_hidden_states, res = self.input_layernorm(hidden_states, residual) # Self Attention attn_output = self.self_attn( normed_hidden_states, cos, sin, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, prefill_cache_indices, ) # faster post attention rms norm normed_attn_res_output, attn_res = self.post_attention_layernorm( attn_output, res ) moe_output = self.moe(normed_attn_res_output) return moe_output, attn_res class MixtralModel(torch.nn.Module): def __init__(self, config, weights): super().__init__() self.embed_tokens = TensorParallelEmbedding( prefix="model.embed_tokens", weights=weights ) self.layers = nn.ModuleList( [ MixtralLayer( layer_id, config, weights, ) for layer_id in range(config.num_hidden_layers) ] ) self.norm = FastRMSNorm.load( prefix="model.norm", weights=weights, eps=config.rms_norm_eps ) self.head_size = self.layers[0].self_attn.head_size self.num_heads = self.layers[0].self_attn.num_heads self.num_key_value_heads = self.layers[0].self_attn.num_key_value_heads def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, true_max_s: int, prefill_cache_indices: Optional[torch.Tensor], ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) # Get rotary cos and sin for this forward # Avoid to index in each layer cos, sin = self.layers[0].self_attn.rotary_emb.get_cos_sin( position_ids, true_max_s, hidden_states.dtype ) residual = None for i, layer in enumerate(self.layers): hidden_states, residual = layer( hidden_states, residual, cos, sin, cu_seqlen_prefill, kv_cache[i], block_tables, slots, input_lengths, max_s, prefill_cache_indices, ) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class FlashMixtralForCausalLM(torch.nn.Module): def __init__(self, config, weights): super().__init__() self.model = MixtralModel(config, weights) self.lm_head = SpeculativeHead.load( config, prefix="lm_head", weights=weights, ) self.max_past = config.sliding_window self.max_past_tensor = ( torch.tensor(config.sliding_window, device=weights.device) if self.max_past is not None else None ) def forward( self, input_ids: torch.Tensor, position_ids: torch.Tensor, cu_seqlen_prefill: Optional[torch.Tensor], kv_cache: List[Tuple[torch.Tensor, torch.Tensor]], block_tables: torch.Tensor, slots: torch.Tensor, input_lengths: torch.Tensor, max_s: int, prefill_cache_indices: Optional[torch.Tensor], lm_head_indices: Optional[torch.Tensor] = None, ) -> torch.Tensor: true_max_s = max_s if prefill_cache_indices is not None: # Slots also need to be sliced as it has the same size as the whole kv tensor slots = slots[prefill_cache_indices] elif self.max_past is not None: # Clamp in decode mode as paged attention requires clamped values whereas the flash attention # kernel requires the true values input_lengths = torch.clamp(input_lengths, max=self.max_past_tensor) hidden_states = self.model( input_ids, position_ids, cu_seqlen_prefill, kv_cache, block_tables, slots, input_lengths, max_s, true_max_s, prefill_cache_indices, ) if lm_head_indices is not None: hidden_states = hidden_states[lm_head_indices] logits = self.lm_head(hidden_states) return logits
text-generation-inference/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/flash_mixtral_modeling.py", "repo_id": "text-generation-inference", "token_count": 14049 }
225
# coding=utf-8 # Copyright 2022 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch OPT model.""" import random from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from transformers.activations import ACT2FN from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel from transformers import OPTConfig from text_generation_server.utils.layers import ( FastLinear, TensorParallelColumnLinear, TensorParallelEmbedding, TensorParallelRowLinear, SpeculativeHead, ) EPS = 1e-5 # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0, ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full( (tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min, device=device), device=device, ) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat( [ torch.zeros( tgt_len, past_key_values_length, dtype=dtype, device=device ), mask, ], dim=-1, ) return mask[None, None, :, :].expand( bsz, 1, tgt_len, tgt_len + past_key_values_length ) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill( inverted_mask.to(torch.bool), torch.finfo(dtype).min ) class OPTLearnedPositionalEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, weights): super().__init__() self.offset = 2 self.weight = nn.Parameter( weights.get_tensor("model.decoder.embed_positions.weight") ) def forward( self, attention_mask: torch.LongTensor, past_key_values_length: int = 0 ): """`input_ids_shape` is expected to be [bsz x seqlen].""" attention_mask = attention_mask.long() # create positions depending on attention_mask positions = ( torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask ).long() - 1 # cut positions if `past_key_values_length` is > 0 positions = positions[:, past_key_values_length:] return torch.nn.functional.embedding(positions + self.offset, self.weight) class OPTAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, config, prefix, weights, is_decoder: bool = False, bias: bool = True, process_group=None, ): super().__init__() hidden_size = config.hidden_size num_heads = config.num_attention_heads self.hidden_size = hidden_size self.num_heads = num_heads self.dropout = config.dropout self.head_dim = hidden_size // num_heads if (self.head_dim * num_heads) != self.hidden_size: raise ValueError( f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder process_group = weights.process_group if self.num_heads % weights.process_group.size() != 0: raise ValueError( f"`num_heads` must be divisible by `num_shards` (got `num_heads`: {self.num_heads} " f"and `num_shards`: {weights.process_group.size()}" ) self.num_heads = self.num_heads // process_group.size() self.hidden_size = self.hidden_size // process_group.size() self.q_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.q_proj", weights=weights, bias=bias ) self.k_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.k_proj", weights=weights, bias=bias ) self.v_proj = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.v_proj", weights=weights, bias=bias ) self.out_proj = TensorParallelRowLinear.load( config, prefix=f"{prefix}.out_proj", weights=weights, bias=bias ) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return ( tensor.view(bsz, seq_len, self.num_heads, self.head_dim) .transpose(1, 2) .contiguous() ) def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = ( attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask ) attn_weights = torch.max( attn_weights, torch.tensor(torch.finfo(attn_weights.dtype).min) ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) # upcast to fp32 if the weights are in fp16. Please see https://github.com/huggingface/transformers/pull/17437 if attn_weights.dtype == torch.float16: attn_weights = nn.functional.softmax( attn_weights, dim=-1, dtype=torch.float32 ).to(torch.float16) else: attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view( bsz, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view( bsz, self.num_heads, tgt_len, src_len ) attn_weights = attn_weights_reshaped.view( bsz * self.num_heads, tgt_len, src_len ) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout( attn_weights, p=self.dropout, training=self.training ) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `hidden_size` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.hidden_size) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class OPTDecoderLayer(nn.Module): def __init__(self, layer_id: int, config: OPTConfig, weights): super().__init__() self.process_group = weights.process_group self.hidden_size = config.hidden_size prefix = f"model.decoder.layers.{layer_id}" self.self_attn = OPTAttention( config, prefix=f"{prefix}.self_attn", weights=weights, is_decoder=True, bias=config.enable_bias, ) self.do_layer_norm_before = config.do_layer_norm_before self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.self_attn_layer_norm = nn.LayerNorm.load( prefix=f"{prefix}.self_attn_layer_norm", weights=weights, eps=EPS ) self.fc1 = TensorParallelColumnLinear.load( config, prefix=f"{prefix}.fc1", weights=weights, bias=config.enable_bias ) self.fc2 = TensorParallelRowLinear.load( config, prefix=f"{prefix}.fc2", weights=weights, bias=config.enable_bias ) self.final_layer_norm = nn.LayerNorm.load( prefix=f"{prefix}.final_layer_norm", weights=weights, eps=EPS ) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, past_key_value: Optional[Tuple[torch.Tensor]] = None, ) -> Tuple[ torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]] ]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, hidden_size)` attention_mask (`torch.FloatTensor`, *optional*): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`, *optional*): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). past_key_value (`Tuple(torch.FloatTensor)`, *optional*): cached past key and value projection states """ residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout( hidden_states, p=self.dropout, training=self.training ) hidden_states = residual + hidden_states # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.self_attn_layer_norm(hidden_states) # Fully Connected hidden_states_shape = hidden_states.shape hidden_states = hidden_states.reshape(-1, hidden_states.size(-1)) residual = hidden_states # 125m, 1.7B, ..., 175B applies layer norm BEFORE attention if self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout( hidden_states, p=self.dropout, training=self.training ) hidden_states = (residual + hidden_states).view(hidden_states_shape) # 350m applies layer norm AFTER attention if not self.do_layer_norm_before: hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights,) if use_cache: outputs += (present_key_value,) return outputs class OPTPreTrainedModel(PreTrainedModel): config_class = OPTConfig class OPTDecoder(OPTPreTrainedModel): def __init__(self, config: OPTConfig, weights): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.vocab_size = config.vocab_size self.embed_tokens = TensorParallelEmbedding( prefix="model.decoder.embed_tokens", weights=weights ) self.embed_positions = OPTLearnedPositionalEmbedding(weights) if config.word_embed_proj_dim != config.hidden_size: self.project_out = FastLinear.load( config, prefix="model.decoder.project_out", weights=weights, bias=False ) else: self.project_out = None if config.word_embed_proj_dim != config.hidden_size: self.project_in = FastLinear.load( config, prefix="model.decoder.project_in", weights=weights, bias=False ) else: self.project_in = None # Note that the only purpose of `config._remove_final_layer_norm` is to keep backward compatibility # with checkpoints that have been fine-tuned before transformers v4.20.1 # see https://github.com/facebookresearch/metaseq/pull/164 if config.do_layer_norm_before and not config._remove_final_layer_norm: self.final_layer_norm = nn.LayerNorm.load( prefix="model.decoder.final_layer_norm", weights=weights, eps=EPS ) else: self.final_layer_norm = None self.layers = nn.ModuleList( [ OPTDecoderLayer(layer_id, config, weights) for layer_id in range(config.num_hidden_layers) ] ) # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask( self, attention_mask, input_shape, inputs_embeds, past_key_values_length ): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask( attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1] ).to(inputs_embeds.device) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(num_hidden_layers, num_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError( "You have to specify either decoder_input_ids or decoder_inputs_embeds" ) if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) batch_size, seq_length = input_shape past_key_values_length = ( past_key_values[0][0].shape[2] if past_key_values is not None else 0 ) # required mask seq length can be calculated via length of past mask_seq_length = past_key_values_length + seq_length # embed positions if attention_mask is None: attention_mask = torch.ones( batch_size, mask_seq_length, device=inputs_embeds.device ) causal_attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) pos_embeds = self.embed_positions(attention_mask, past_key_values_length) if self.project_in is not None: inputs_embeds = self.project_in(inputs_embeds) hidden_states = inputs_embeds + pos_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask], ["head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != (len(self.layers)): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = ( past_key_values[idx] if past_key_values is not None else None ) layer_outputs = decoder_layer( hidden_states, attention_mask=causal_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[2 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if self.final_layer_norm is not None: hidden_states = self.final_layer_norm(hidden_states) if self.project_out is not None: hidden_states = self.project_out(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, ) class OPTModel(OPTPreTrainedModel): def __init__(self, config: OPTConfig, weights): super().__init__(config) self.decoder = OPTDecoder(config, weights) # Initialize weights and apply final processing def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPast]: output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs return BaseModelOutputWithPast( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, hidden_states=decoder_outputs.hidden_states, attentions=decoder_outputs.attentions, ) class OPTForCausalLM(OPTPreTrainedModel): def __init__(self, config, weights): super().__init__(config) self.model = OPTModel(config, weights) self.lm_head = SpeculativeHead.load( config, prefix="model.decoder.embed_tokens", weights=weights ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits, speculative_logits = self.lm_head(outputs) loss = None return ( CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ), speculative_logits, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs, ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, } ) return model_inputs @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple( past_state.index_select(0, beam_idx) for past_state in layer_past ), ) return reordered_past
text-generation-inference/server/text_generation_server/models/custom_modeling/opt_modeling.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/opt_modeling.py", "repo_id": "text-generation-inference", "token_count": 15538 }
226
import torch import torch.distributed from typing import Optional from transformers import ( AutoTokenizer, AutoConfig, ) from text_generation_server.models import CausalLM from text_generation_server.models.custom_modeling.neox_modeling import ( GPTNeoxForCausalLM, ) from text_generation_server.utils import ( initialize_torch_distributed, weight_files, Weights, ) class GPTNeoxSharded(CausalLM): def __init__( self, model_id: str, revision: Optional[str] = None, quantize: Optional[str] = None, use_medusa: Optional[str] = None, dtype: Optional[torch.dtype] = None, trust_remote_code: bool = False, ): self.process_group, rank, world_size = initialize_torch_distributed() if torch.cuda.is_available(): device = torch.device(f"cuda:{rank}") dtype = torch.float16 if dtype is None else dtype else: device = torch.device("cpu") dtype = torch.float32 if dtype is None else dtype tokenizer = AutoTokenizer.from_pretrained( model_id, revision=revision, padding_side="left", truncation_side="left", trust_remote_code=trust_remote_code, ) tokenizer.pad_token = tokenizer.eos_token config = AutoConfig.from_pretrained( model_id, revision=revision, trust_remote_code=trust_remote_code, ) config.quantize = quantize config.use_medusa = use_medusa torch.distributed.barrier(group=self.process_group) filenames = weight_files(model_id, revision=revision, extension=".safetensors") weights = Weights( filenames, device=device, dtype=dtype, process_group=self.process_group ) if config.quantize == "gptq": weights._set_gptq_params(model_id, revision) model = GPTNeoxForCausalLM(config, weights) torch.distributed.barrier(group=self.process_group) super(CausalLM, self).__init__( model=model, tokenizer=tokenizer, requires_padding=True, dtype=dtype, device=device, rank=rank, world_size=world_size, ) def forward( self, input_ids, attention_mask, position_ids, past_key_values: Optional = None ): outputs, speculative_logits = self.model.forward( input_ids=input_ids, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, use_cache=True, ) logits = outputs.logits return logits, speculative_logits, outputs.past_key_values
text-generation-inference/server/text_generation_server/models/gpt_neox.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/models/gpt_neox.py", "repo_id": "text-generation-inference", "token_count": 1269 }
227
from text_generation_server.utils.convert import convert_file, convert_files from text_generation_server.utils.dist import initialize_torch_distributed from text_generation_server.utils.weights import Weights from text_generation_server.utils.peft import download_and_unload_peft from text_generation_server.utils.hub import ( weight_files, weight_hub_files, download_weights, EntryNotFoundError, LocalEntryNotFoundError, RevisionNotFoundError, ) from text_generation_server.utils.tokens import ( NextTokenChooser, HeterogeneousNextTokenChooser, StoppingCriteria, StopSequenceCriteria, FinishReason, Sampling, Greedy, ) __all__ = [ "convert_file", "convert_files", "initialize_torch_distributed", "weight_files", "weight_hub_files", "download_weights", "download_and_unload_peft", "EntryNotFoundError", "HeterogeneousNextTokenChooser", "LocalEntryNotFoundError", "RevisionNotFoundError", "Greedy", "NextTokenChooser", "Sampling", "StoppingCriteria", "StopSequenceCriteria", "FinishReason", "Weights", ]
text-generation-inference/server/text_generation_server/utils/__init__.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/__init__.py", "repo_id": "text-generation-inference", "token_count": 417 }
228
import torch # vllm imports from vllm import cache_ops from vllm import attention_ops _PARTITION_SIZE = 512 def reshape_and_cache( key: torch.Tensor, value: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, slots: torch.Tensor, ): cache_ops.reshape_and_cache(key, value, key_cache, value_cache, slots) def attention( out: torch.Tensor, query: torch.Tensor, key_cache: torch.Tensor, value_cache: torch.Tensor, kv_head_mapping: torch.Tensor, softmax_scale: float, block_tables: torch.Tensor, input_lengths: torch.Tensor, max_s: int, ): # Adapted from: https://github.com/vllm-project/vllm/blob/f8a1e39fae05ca610be8d5a78be9d40f5274e5fc/vllm/model_executor/layers/attention.py # Copyright 2023 The vLLM team. All rights # reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # value_cache => [num_blocks, num_heads, head_size, block_size] block_size = value_cache.shape[3] num_seqs, num_heads, head_size = query.shape max_num_partitions = (max_s + _PARTITION_SIZE - 1) // _PARTITION_SIZE # NOTE(woosuk): We use a simple heuristic to decide whether to use # PagedAttention V1 or V2. If the number of partitions is 1, we use # V1 to avoid the overhead of reduction. Also, if the number of # sequences or heads is large, we use V1 since there is enough work # to parallelize. use_v1 = max_num_partitions == 1 or num_seqs * num_heads > 512 if use_v1: attention_ops.paged_attention_v1( out, query, key_cache, value_cache, kv_head_mapping, softmax_scale, block_tables, input_lengths, block_size, max_s, None, ) else: # Run PagedAttention V2. assert _PARTITION_SIZE % block_size == 0 tmp_output = torch.empty( size=(num_seqs, num_heads, max_num_partitions, head_size), dtype=out.dtype, device=out.device, ) exp_sums = torch.empty( size=(num_seqs, num_heads, max_num_partitions), dtype=torch.float32, device=out.device, ) max_logits = torch.empty_like(exp_sums) attention_ops.paged_attention_v2( out, exp_sums, max_logits, tmp_output, query, key_cache, value_cache, kv_head_mapping, softmax_scale, block_tables, input_lengths, block_size, max_s, None, )
text-generation-inference/server/text_generation_server/utils/paged_attention.py/0
{ "file_path": "text-generation-inference/server/text_generation_server/utils/paged_attention.py", "repo_id": "text-generation-inference", "token_count": 1485 }
229
nodeLinker: node-modules npmAuditRegistry: 'https://registry.npmjs.org' yarnPath: .yarn/releases/yarn-3.5.1.cjs
tokenizers/bindings/node/.yarnrc.yml/0
{ "file_path": "tokenizers/bindings/node/.yarnrc.yml", "repo_id": "tokenizers", "token_count": 53 }
230
/* eslint-disable @typescript-eslint/no-empty-function */ /* eslint-disable @typescript-eslint/no-explicit-any */ import { BPE, Unigram, WordPiece } from '../../' const MOCKS_DIR = __dirname + '/__mocks__' describe('WordPiece', () => { describe('fromFile', () => { it('throws if called with only one argument', () => { expect(() => (WordPiece as any).fromFile()).toThrow( 'Failed to convert JavaScript value `Undefined` into rust type `String`', ) }) it('throws if called with 2 arguments without a callback as third argument', () => { expect(() => (WordPiece as any).fromFile({})).toThrow( 'Failed to convert JavaScript value `Object {}` into rust type `String`', ) }) it('has its callback called with the loaded model', async () => { const model = await WordPiece.fromFile(`${MOCKS_DIR}/vocab.txt`) expect(model).toBeDefined() }) }) }) describe('BPE', () => { describe('fromFile', () => { it('has its callback called with the loaded model', async () => { const model = await BPE.fromFile(`${MOCKS_DIR}/vocab.json`, `${MOCKS_DIR}/merges.txt`) expect(model).toBeDefined() }) it('has its callback called with the loaded model', async () => { const model = await BPE.fromFile(`${MOCKS_DIR}/vocab.json`, `${MOCKS_DIR}/merges.txt`, {}) expect(model).toBeDefined() }) }) describe('When initialized from memory', () => { it('returns the loaded Model', () => { const bpe = BPE.init({ a: 0, b: 1, ab: 2 }, [['a', 'b']]) // expect(bpe.constructor.name).toEqual("Model"); expect(bpe.constructor.name).toEqual('BPE') }) }) }) describe('Unigram', () => { it('can be initialized from memory', () => { const unigram = Unigram.init( [ ['<unk>', 0], ['Hello', -1], ['there', -2], ], { unkId: 0, }, ) expect(unigram.constructor.name).toEqual('Unigram') }) })
tokenizers/bindings/node/lib/bindings/models.test.ts/0
{ "file_path": "tokenizers/bindings/node/lib/bindings/models.test.ts", "repo_id": "tokenizers", "token_count": 818 }
231
# `tokenizers-linux-arm-gnueabihf` This is the **armv7-unknown-linux-gnueabihf** binary for `tokenizers`
tokenizers/bindings/node/npm/linux-arm-gnueabihf/README.md/0
{ "file_path": "tokenizers/bindings/node/npm/linux-arm-gnueabihf/README.md", "repo_id": "tokenizers", "token_count": 42 }
232
{ "name": "tokenizers", "version": "0.14.0-dev0", "repository": { "type": "git", "url": "git+https://github.com/huggingface/tokenizers.git" }, "bugs": { "url": "https://github.com/huggingface/tokenizers/issues" }, "homepage": "https://github.com/huggingface/tokenizers/tree/master/bindings/node", "author": "Anthony MOI <[email protected]>", "license": "Apache-2.0", "description": "Provides an implementation of today's most used tokenizers, with a focus on performances and versatility.", "files": [ "index.d.ts", "index.js" ], "napi": { "name": "tokenizers", "triples": { "defaults": true, "additional": [ "x86_64-unknown-linux-musl", "aarch64-unknown-linux-gnu", "i686-pc-windows-msvc", "armv7-unknown-linux-gnueabihf", "aarch64-apple-darwin", "aarch64-linux-android", "x86_64-unknown-freebsd", "aarch64-unknown-linux-musl", "aarch64-pc-windows-msvc", "armv7-linux-androideabi" ] } }, "engines": { "node": ">= 10" }, "publishConfig": { "registry": "https://registry.npmjs.org/", "access": "public" }, "scripts": { "artifacts": "napi artifacts", "bench": "node -r @swc-node/register benchmark/bench.ts", "build": "napi build --platform --release --pipe \"prettier -w\"", "build:debug": "napi build --platform --pipe \"prettier -w\"", "format": "run-p format:prettier format:rs format:toml", "format:prettier": "prettier . -w", "format:toml": "taplo format", "format:rs": "cargo fmt", "lint": "eslint . -c ./.eslintrc.yml", "prepublishOnly": "napi prepublish -t npm", "test": "jest", "version": "napi version" }, "devDependencies": { "@napi-rs/cli": "^2.14.6", "@swc-node/register": "^1.5.5", "@swc/core": "^1.3.32", "@taplo/cli": "^0.5.2", "@types/jest": "^29.5.1", "@typescript-eslint/eslint-plugin": "^5.50.0", "@typescript-eslint/parser": "^5.50.0", "ava": "^5.1.1", "benny": "^3.7.1", "chalk": "^5.2.0", "eslint": "^8.33.0", "eslint-config-prettier": "^8.6.0", "eslint-plugin-import": "^2.27.5", "eslint-plugin-prettier": "^4.2.1", "husky": "^8.0.3", "jest": "^29.5.0", "lint-staged": "^13.1.0", "npm-run-all": "^4.1.5", "prettier": "^2.8.3", "ts-jest": "^29.1.0", "typescript": "^5.0.0" }, "lint-staged": { "*.@(js|ts|tsx)": [ "eslint -c .eslintrc.yml --fix" ], "*.@(js|ts|tsx|yml|yaml|md|json)": [ "prettier --write" ], "*.toml": [ "taplo format" ] }, "ava": { "require": [ "@swc-node/register" ], "extensions": [ "ts" ], "timeout": "2m", "workerThreads": false, "environmentVariables": { "TS_NODE_PROJECT": "./tsconfig.json" } }, "prettier": { "printWidth": 120, "semi": false, "trailingComma": "all", "singleQuote": true, "arrowParens": "always" }, "packageManager": "[email protected]" }
tokenizers/bindings/node/package.json/0
{ "file_path": "tokenizers/bindings/node/package.json", "repo_id": "tokenizers", "token_count": 1532 }
233
{ "compilerOptions": { "target": "ES2018", "strict": true, "moduleResolution": "node", "module": "CommonJS", "noUnusedLocals": true, "noUnusedParameters": true, "esModuleInterop": true, "allowSyntheticDefaultImports": true }, "include": ["."], "exclude": ["node_modules"] }
tokenizers/bindings/node/tsconfig.json/0
{ "file_path": "tokenizers/bindings/node/tsconfig.json", "repo_id": "tokenizers", "token_count": 129 }
234
<jupyter_start><jupyter_code>!wget https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt -O /tmp/bert-base-uncased-vocab.txt from tokenizers import BertWordPieceTokenizer from tokenizers.tools import EncodingVisualizer EncodingVisualizer.unk_token_regex.search("aaa[udsnk]aaa") text = """Mathias Bynens 'Z͑ͫ̓ͪ̂ͫ̽͏̴̙̤̞͉͚̯̞̠͍A̴̵̜̰͔ͫ͗͢L̠ͨͧͩ͘G̴̻͈͍͔̹̑͗̎̅͛́Ǫ̵̹̻̝̳͂̌̌͘!͖̬̰̙̗̿̋ͥͥ̂ͣ̐́́͜͞': Whenever you’re working on a piece of JavaScript code that deals with strings or regular expressions in some way, just add a unit test that contains a pile of poo (💩) in a string, 💩💩💩💩💩💩💩💩💩💩💩💩 and see if anything breaks. It’s a quick, fun, and easy way to see if your code supports astral symbols. Once you’ve found a Unicode-related bug in your code, all you need to do is apply the techniques discussed in this post to fix it.""" tokenizer = BertWordPieceTokenizer("/tmp/bert-base-uncased-vocab.txt", lowercase=True) visualizer = EncodingVisualizer(tokenizer=tokenizer)<jupyter_output><empty_output><jupyter_text>Visualizing Tokens With No Annotations<jupyter_code>visualizer(text)<jupyter_output><empty_output><jupyter_text>Visualizing Tokens With Aligned AnnotationsFirst we make some annotations with the Annotation class<jupyter_code>from tokenizers.tools import Annotation anno1 = Annotation(start=0, end=2, label="foo") anno2 = Annotation(start=2, end=4, label="bar") anno3 = Annotation(start=6, end=8, label="poo") anno4 = Annotation(start=9, end=12, label="shoe") annotations=[ anno1, anno2, anno3, anno4, Annotation(start=23, end=30, label="random tandem bandem sandem landem fandom"), Annotation(start=63, end=70, label="foo"), Annotation(start=80, end=95, label="bar"), Annotation(start=120, end=128, label="bar"), Annotation(start=152, end=155, label="poo"), ] visualizer(text,annotations=annotations)<jupyter_output><empty_output><jupyter_text>Using A Custom Annotation FormatEvery system has its own representation of annotations. That's why we can instantiate the EncodingVisualizer with a convertion function.<jupyter_code>funnyAnnotations = [dict(startPlace=i,endPlace=i+3,theTag=str(i)) for i in range(0,20,4)] funnyAnnotations converter = lambda funny: Annotation(start=funny['startPlace'], end=funny['endPlace'], label=funny['theTag']) visualizer = EncodingVisualizer(tokenizer=tokenizer, default_to_notebook=True, annotation_converter=converter) visualizer(text, annotations=funnyAnnotations)<jupyter_output><empty_output><jupyter_text>Trying with Roberta<jupyter_code>!wget "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-vocab.json" -O /tmp/roberta-base-vocab.json !wget "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-merges.txt" -O /tmp/roberta-base-merges.txt from tokenizers import ByteLevelBPETokenizer roberta_tokenizer = ByteLevelBPETokenizer.from_file('/tmp/roberta-base-vocab.json', '/tmp/roberta-base-merges.txt') roberta_visualizer = EncodingVisualizer(tokenizer=roberta_tokenizer, default_to_notebook=True) roberta_visualizer(text, annotations=annotations)<jupyter_output><empty_output>
tokenizers/bindings/python/examples/using_the_visualizer.ipynb/0
{ "file_path": "tokenizers/bindings/python/examples/using_the_visualizer.ipynb", "repo_id": "tokenizers", "token_count": 1221 }
235
# Generated content DO NOT EDIT from .. import pre_tokenizers PreTokenizer = pre_tokenizers.PreTokenizer BertPreTokenizer = pre_tokenizers.BertPreTokenizer ByteLevel = pre_tokenizers.ByteLevel CharDelimiterSplit = pre_tokenizers.CharDelimiterSplit Digits = pre_tokenizers.Digits Metaspace = pre_tokenizers.Metaspace Punctuation = pre_tokenizers.Punctuation Sequence = pre_tokenizers.Sequence Split = pre_tokenizers.Split UnicodeScripts = pre_tokenizers.UnicodeScripts Whitespace = pre_tokenizers.Whitespace WhitespaceSplit = pre_tokenizers.WhitespaceSplit
tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py/0
{ "file_path": "tokenizers/bindings/python/py_src/tokenizers/pre_tokenizers/__init__.py", "repo_id": "tokenizers", "token_count": 177 }
236
use pyo3::exceptions; use pyo3::prelude::*; use pyo3::types::*; use tk::tokenizer::{Offsets, PaddingDirection}; use tk::utils::truncation::TruncationDirection; use tokenizers as tk; use crate::error::{deprecation_warning, PyError}; /// The :class:`~tokenizers.Encoding` represents the output of a :class:`~tokenizers.Tokenizer`. #[pyclass(dict, module = "tokenizers", name = "Encoding")] #[repr(transparent)] pub struct PyEncoding { pub encoding: tk::tokenizer::Encoding, } impl From<tk::tokenizer::Encoding> for PyEncoding { fn from(v: tk::tokenizer::Encoding) -> Self { Self { encoding: v } } } #[pymethods] impl PyEncoding { #[new] #[pyo3(text_signature = None)] fn new() -> Self { Self { encoding: tk::tokenizer::Encoding::default(), } } fn __getstate__(&self, py: Python) -> PyResult<PyObject> { let data = serde_json::to_string(&self.encoding).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to pickle Encoding: {}", e )) })?; Ok(PyBytes::new(py, data.as_bytes()).to_object(py)) } fn __setstate__(&mut self, py: Python, state: PyObject) -> PyResult<()> { match state.extract::<&PyBytes>(py) { Ok(s) => { self.encoding = serde_json::from_slice(s.as_bytes()).map_err(|e| { exceptions::PyException::new_err(format!( "Error while attempting to unpickle Encoding: {}", e )) })?; Ok(()) } Err(e) => Err(e), } } fn __repr__(&self) -> PyResult<String> { Ok(format!( "Encoding(num_tokens={}, attributes=[ids, type_ids, tokens, offsets, \ attention_mask, special_tokens_mask, overflowing])", self.encoding.get_ids().len() )) } fn __len__(&self) -> PyResult<usize> { Ok(self.encoding.len()) } /// Merge the list of encodings into one final :class:`~tokenizers.Encoding` /// /// Args: /// encodings (A :obj:`List` of :class:`~tokenizers.Encoding`): /// The list of encodings that should be merged in one /// /// growing_offsets (:obj:`bool`, defaults to :obj:`True`): /// Whether the offsets should accumulate while merging /// /// Returns: /// :class:`~tokenizers.Encoding`: The resulting Encoding #[staticmethod] #[pyo3(signature = (encodings, growing_offsets = true))] #[pyo3(text_signature = "(encodings, growing_offsets=True)")] fn merge(encodings: Vec<PyRef<PyEncoding>>, growing_offsets: bool) -> PyEncoding { tk::tokenizer::Encoding::merge( encodings.into_iter().map(|e| e.encoding.clone()), growing_offsets, ) .into() } /// The number of sequences represented /// /// Returns: /// :obj:`int`: The number of sequences in this :class:`~tokenizers.Encoding` #[getter] fn get_n_sequences(&self) -> usize { self.encoding.n_sequences() } /// Set the given sequence index /// /// Set the given sequence index for the whole range of tokens contained in this /// :class:`~tokenizers.Encoding`. #[pyo3(text_signature = "(self, sequence_id)")] fn set_sequence_id(&mut self, sequence_id: usize) { self.encoding.set_sequence_id(sequence_id); } /// The generated IDs /// /// The IDs are the main input to a Language Model. They are the token indices, /// the numerical representations that a LM understands. /// /// Returns: /// :obj:`List[int]`: The list of IDs #[getter] fn get_ids(&self) -> Vec<u32> { self.encoding.get_ids().to_vec() } /// The generated tokens /// /// They are the string representation of the IDs. /// /// Returns: /// :obj:`List[str]`: The list of tokens #[getter] fn get_tokens(&self) -> Vec<String> { self.encoding.get_tokens().to_vec() } /// The generated word indices. /// /// .. warning:: /// This is deprecated and will be removed in a future version. /// Please use :obj:`~tokenizers.Encoding.word_ids` instead. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_words(&self, py: Python<'_>) -> PyResult<Vec<Option<u32>>> { deprecation_warning( py, "0.9.4", "Encoding.words is deprecated, please use Encoding.word_ids instead.", )?; Ok(self.get_word_ids()) } /// The generated word indices. /// /// They represent the index of the word associated to each token. /// When the input is pre-tokenized, they correspond to the ID of the given input label, /// otherwise they correspond to the words indices as defined by the /// :class:`~tokenizers.pre_tokenizers.PreTokenizer` that was used. /// /// For special tokens and such (any token that was generated from something that was /// not part of the input), the output is :obj:`None` /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional word index. #[getter] fn get_word_ids(&self) -> Vec<Option<u32>> { self.encoding.get_word_ids().to_vec() } /// The generated sequence indices. /// /// They represent the index of the input sequence associated to each token. /// The sequence id can be None if the token is not related to any input sequence, /// like for example with special tokens. /// /// Returns: /// A :obj:`List` of :obj:`Optional[int]`: A list of optional sequence index. #[getter] fn get_sequence_ids(&self) -> Vec<Option<usize>> { self.encoding.get_sequence_ids() } /// The generated type IDs /// /// Generally used for tasks like sequence classification or question answering, /// these tokens let the LM know which input sequence corresponds to each tokens. /// /// Returns: /// :obj:`List[int]`: The list of type ids #[getter] fn get_type_ids(&self) -> Vec<u32> { self.encoding.get_type_ids().to_vec() } /// The offsets associated to each token /// /// These offsets let's you slice the input string, and thus retrieve the original /// part that led to producing the corresponding token. /// /// Returns: /// A :obj:`List` of :obj:`Tuple[int, int]`: The list of offsets #[getter] fn get_offsets(&self) -> Vec<(usize, usize)> { self.encoding.get_offsets().to_vec() } /// The special token mask /// /// This indicates which tokens are special tokens, and which are not. /// /// Returns: /// :obj:`List[int]`: The special tokens mask #[getter] fn get_special_tokens_mask(&self) -> Vec<u32> { self.encoding.get_special_tokens_mask().to_vec() } /// The attention mask /// /// This indicates to the LM which tokens should be attended to, and which should not. /// This is especially important when batching sequences, where we need to applying /// padding. /// /// Returns: /// :obj:`List[int]`: The attention mask #[getter] fn get_attention_mask(&self) -> Vec<u32> { self.encoding.get_attention_mask().to_vec() } /// A :obj:`List` of overflowing :class:`~tokenizers.Encoding` /// /// When using truncation, the :class:`~tokenizers.Tokenizer` takes care of splitting /// the output into as many pieces as required to match the specified maximum length. /// This field lets you retrieve all the subsequent pieces. /// /// When you use pairs of sequences, the overflowing pieces will contain enough /// variations to cover all the possible combinations, while respecting the provided /// maximum length. #[getter] fn get_overflowing(&self) -> Vec<PyEncoding> { self.encoding .get_overflowing() .clone() .into_iter() .map(|e| e.into()) .collect() } /// Get the encoded tokens corresponding to the word at the given index /// in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of tokens: :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_tokens(&self, word_index: u32, sequence_index: usize) -> Option<(usize, usize)> { self.encoding.word_to_tokens(word_index, sequence_index) } /// Get the offsets of the word at the given index in one of the input sequences. /// /// Args: /// word_index (:obj:`int`): /// The index of a word in one of the input sequences. /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target word /// /// Returns: /// :obj:`Tuple[int, int]`: The range of characters (span) :obj:`(first, last + 1)` #[pyo3(signature = (word_index, sequence_index = 0))] #[pyo3(text_signature = "(self, word_index, sequence_index=0)")] fn word_to_chars(&self, word_index: u32, sequence_index: usize) -> Option<Offsets> { self.encoding.word_to_chars(word_index, sequence_index) } /// Get the index of the sequence represented by the given token. /// /// In the general use case, this method returns :obj:`0` for a single sequence or /// the first sequence of a pair, and :obj:`1` for the second sequence of a pair /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The sequence id of the given token #[pyo3(text_signature = "(self, token_index)")] fn token_to_sequence(&self, token_index: usize) -> Option<usize> { self.encoding.token_to_sequence(token_index) } /// Get the offsets of the token at the given index. /// /// The returned offsets are related to the input sequence that contains the /// token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`Tuple[int, int]`: The token offsets :obj:`(first, last + 1)` #[pyo3(text_signature = "(self, token_index)")] fn token_to_chars(&self, token_index: usize) -> Option<Offsets> { let (_, offsets) = self.encoding.token_to_chars(token_index)?; Some(offsets) } /// Get the index of the word that contains the token in one of the input sequences. /// /// The returned word index is related to the input sequence that contains /// the token. In order to determine in which input sequence it belongs, you /// must call :meth:`~tokenizers.Encoding.token_to_sequence()`. /// /// Args: /// token_index (:obj:`int`): /// The index of a token in the encoded sequence. /// /// Returns: /// :obj:`int`: The index of the word in the relevant input sequence. #[pyo3(text_signature = "(self, token_index)")] fn token_to_word(&self, token_index: usize) -> Option<u32> { let (_, word_idx) = self.encoding.token_to_word(token_index)?; Some(word_idx) } /// Get the token that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the token that contains this char in the encoded sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_token(&self, char_pos: usize, sequence_index: usize) -> Option<usize> { self.encoding.char_to_token(char_pos, sequence_index) } /// Get the word that contains the char at the given position in the input sequence. /// /// Args: /// char_pos (:obj:`int`): /// The position of a char in the input string /// sequence_index (:obj:`int`, defaults to :obj:`0`): /// The index of the sequence that contains the target char /// /// Returns: /// :obj:`int`: The index of the word that contains this char in the input sequence #[pyo3(signature = (char_pos, sequence_index = 0))] #[pyo3(text_signature = "(self, char_pos, sequence_index=0)")] fn char_to_word(&self, char_pos: usize, sequence_index: usize) -> Option<u32> { self.encoding.char_to_word(char_pos, sequence_index) } /// Pad the :class:`~tokenizers.Encoding` at the given length /// /// Args: /// length (:obj:`int`): /// The desired length /// /// direction: (:obj:`str`, defaults to :obj:`right`): /// The expected padding direction. Can be either :obj:`right` or :obj:`left` /// /// pad_id (:obj:`int`, defaults to :obj:`0`): /// The ID corresponding to the padding token /// /// pad_type_id (:obj:`int`, defaults to :obj:`0`): /// The type ID corresponding to the padding token /// /// pad_token (:obj:`str`, defaults to `[PAD]`): /// The pad token to use #[pyo3(signature = (length, **kwargs))] #[pyo3( text_signature = "(self, length, direction='right', pad_id=0, pad_type_id=0, pad_token='[PAD]')" )] fn pad(&mut self, length: usize, kwargs: Option<&PyDict>) -> PyResult<()> { let mut pad_id = 0; let mut pad_type_id = 0; let mut pad_token = "[PAD]"; let mut direction = PaddingDirection::Right; if let Some(kwargs) = kwargs { for (key, value) in kwargs { let key: &str = key.extract()?; match key { "direction" => { let value: &str = value.extract()?; direction = match value { "left" => Ok(PaddingDirection::Left), "right" => Ok(PaddingDirection::Right), other => Err(PyError(format!( "Unknown `direction`: `{}`. Use \ one of `left` or `right`", other )) .into_pyerr::<exceptions::PyValueError>()), }?; } "pad_id" => pad_id = value.extract()?, "pad_type_id" => pad_type_id = value.extract()?, "pad_token" => pad_token = value.extract()?, _ => println!("Ignored unknown kwarg option {}", key), } } } self.encoding .pad(length, pad_id, pad_type_id, pad_token, direction); Ok(()) } /// Truncate the :class:`~tokenizers.Encoding` at the given length /// /// If this :class:`~tokenizers.Encoding` represents multiple sequences, when truncating /// this information is lost. It will be considered as representing a single sequence. /// /// Args: /// max_length (:obj:`int`): /// The desired length /// /// stride (:obj:`int`, defaults to :obj:`0`): /// The length of previous content to be included in each overflowing piece /// /// direction (:obj:`str`, defaults to :obj:`right`): /// Truncate direction #[pyo3(signature = (max_length, stride = 0, direction = "right"))] #[pyo3(text_signature = "(self, max_length, stride=0, direction='right')")] fn truncate(&mut self, max_length: usize, stride: usize, direction: &str) -> PyResult<()> { let tdir = match direction { "left" => Ok(TruncationDirection::Left), "right" => Ok(TruncationDirection::Right), _ => Err(PyError(format!( "Invalid truncation direction value : {}", direction )) .into_pyerr::<exceptions::PyValueError>()), }?; self.encoding.truncate(max_length, stride, tdir); Ok(()) } }
tokenizers/bindings/python/src/encoding.rs/0
{ "file_path": "tokenizers/bindings/python/src/encoding.rs", "repo_id": "tokenizers", "token_count": 7397 }
237
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, processors from tokenizers.implementations import BaseTokenizer class TestBaseTokenizer: def test_get_set_components(self): toki = Tokenizer(models.BPE()) toki.normalizer = normalizers.NFC() toki.pre_tokenizer = pre_tokenizers.ByteLevel() toki.post_processor = processors.BertProcessing(("A", 0), ("B", 1)) toki.decoder = decoders.ByteLevel() tokenizer = BaseTokenizer(toki) assert isinstance(tokenizer.model, models.BPE) assert isinstance(tokenizer.normalizer, normalizers.NFC) assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.ByteLevel) assert isinstance(tokenizer.post_processor, processors.BertProcessing) assert isinstance(tokenizer.decoder, decoders.ByteLevel) tokenizer.model = models.Unigram() assert isinstance(tokenizer.model, models.Unigram) tokenizer.normalizer = normalizers.NFD() assert isinstance(tokenizer.normalizer, normalizers.NFD) tokenizer.pre_tokenizer = pre_tokenizers.Whitespace() assert isinstance(tokenizer.pre_tokenizer, pre_tokenizers.Whitespace) tokenizer.post_processor = processors.ByteLevel() assert isinstance(tokenizer.post_processor, processors.ByteLevel) tokenizer.decoder = decoders.WordPiece() assert isinstance(tokenizer.decoder, decoders.WordPiece)
tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py/0
{ "file_path": "tokenizers/bindings/python/tests/implementations/test_base_tokenizer.py", "repo_id": "tokenizers", "token_count": 550 }
238
# Normalizers <tokenizerslangcontent> <python> ## BertNormalizer [[autodoc]] tokenizers.normalizers.BertNormalizer ## Lowercase [[autodoc]] tokenizers.normalizers.Lowercase ## NFC [[autodoc]] tokenizers.normalizers.NFC ## NFD [[autodoc]] tokenizers.normalizers.NFD ## NFKC [[autodoc]] tokenizers.normalizers.NFKC ## NFKD [[autodoc]] tokenizers.normalizers.NFKD ## Nmt [[autodoc]] tokenizers.normalizers.Nmt ## Normalizer [[autodoc]] tokenizers.normalizers.Normalizer ## Precompiled [[autodoc]] tokenizers.normalizers.Precompiled ## Replace [[autodoc]] tokenizers.normalizers.Replace ## Sequence [[autodoc]] tokenizers.normalizers.Sequence ## Strip [[autodoc]] tokenizers.normalizers.Strip ## StripAccents [[autodoc]] tokenizers.normalizers.StripAccents </python> <rust> The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website. </rust> <node> The node API has not been documented yet. </node> </tokenizerslangcontent>
tokenizers/docs/source-doc-builder/api/normalizers.mdx/0
{ "file_path": "tokenizers/docs/source-doc-builder/api/normalizers.mdx", "repo_id": "tokenizers", "token_count": 350 }
239
🤗 Tokenizers is tested on Python 3.5+. You should install 🤗 Tokenizers in a `virtual environment <https://docs.python.org/3/library/venv.html>`_. If you're unfamiliar with Python virtual environments, check out the `user guide <https://packaging.python.org/guides/installing-using-pip-and-virtual-environments/>`__. Create a virtual environment with the version of Python you're going to use and activate it. Installation with pip ---------------------------------------------------------------------------------------------------- 🤗 Tokenizers can be installed using pip as follows:: pip install tokenizers Installation from sources ---------------------------------------------------------------------------------------------------- To use this method, you need to have the Rust language installed. You can follow `the official guide <https://www.rust-lang.org/learn/get-started>`__ for more information. If you are using a unix based OS, the installation should be as simple as running:: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh Or you can easiy update it with the following command:: rustup update Once rust is installed, we can start retrieving the sources for 🤗 Tokenizers:: git clone https://github.com/huggingface/tokenizers Then we go into the python bindings folder:: cd tokenizers/bindings/python At this point you should have your `virtual environment`_ already activated. In order to compile 🤗 Tokenizers, you need to:: pip install -e .
tokenizers/docs/source/installation/python.inc/0
{ "file_path": "tokenizers/docs/source/installation/python.inc", "repo_id": "tokenizers", "token_count": 384 }
240
use tokenizers::models::wordpiece::WordPiece; use tokenizers::{AddedToken, Tokenizer}; fn main() { let start = std::time::Instant::now(); let mut tokenizer = Tokenizer::new(WordPiece::default()); // Mix special and not special // You can make sure ids are in order, and special status is correct. let tokens: Vec<_> = (0..120_000) .map(|i| AddedToken::from(format!("[SPECIAL_{}]", i), i % 2 == 0)) .collect(); tokenizer.add_tokens(&tokens); tokenizer.save("_tok.json", true).unwrap(); println!("Save took {:?}", start.elapsed()); let start = std::time::Instant::now(); let _tok = Tokenizer::from_file("_tok.json").unwrap(); println!("Took {:?}", start.elapsed()); std::fs::remove_file("_tok.json").unwrap(); }
tokenizers/tokenizers/examples/serialization.rs/0
{ "file_path": "tokenizers/tokenizers/examples/serialization.rs", "repo_id": "tokenizers", "token_count": 300 }
241
#![allow(clippy::map_entry)] use super::{Pair, WithFirstLastIterator, Word, BPE}; use crate::parallelism::*; use crate::tokenizer::{AddedToken, Result, Trainer}; use crate::utils::progress::{ProgressBar, ProgressStyle}; use serde::{Deserialize, Serialize}; use std::cmp::Ordering; use std::collections::{BinaryHeap, HashMap, HashSet}; #[derive(Debug, Eq)] struct Merge { pair: Pair, count: u64, pos: HashSet<usize>, } impl PartialEq for Merge { fn eq(&self, other: &Self) -> bool { self.count == other.count && self.pair == other.pair } } impl PartialOrd for Merge { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for Merge { fn cmp(&self, other: &Self) -> Ordering { if self.count != other.count { self.count.cmp(&other.count) } else { // Here we want ascending order other.pair.cmp(&self.pair) } } } struct Config { min_frequency: u64, vocab_size: usize, show_progress: bool, special_tokens: Vec<AddedToken>, limit_alphabet: Option<usize>, initial_alphabet: HashSet<char>, continuing_subword_prefix: Option<String>, end_of_word_suffix: Option<String>, max_token_length: Option<usize>, } /// A `BpeTrainerBuilder` can be used to create a `BpeTrainer` with a custom /// configuration. pub struct BpeTrainerBuilder { config: Config, } impl Default for BpeTrainerBuilder { fn default() -> Self { Self { config: Config { min_frequency: 0, vocab_size: 30000, show_progress: true, special_tokens: vec![], limit_alphabet: None, initial_alphabet: HashSet::new(), continuing_subword_prefix: None, end_of_word_suffix: None, max_token_length: None, }, } } } impl BpeTrainerBuilder { /// Constructs a new `BpeTrainerBuilder` pub fn new() -> Self { Self::default() } /// Set the expected minimum frequency #[must_use] pub fn min_frequency(mut self, frequency: u64) -> Self { self.config.min_frequency = frequency; self } /// Set the vocabulary size #[must_use] pub fn vocab_size(mut self, size: usize) -> Self { self.config.vocab_size = size; self } /// Set whether to show progress #[must_use] pub fn show_progress(mut self, show: bool) -> Self { self.config.show_progress = show; self } /// Set the special tokens #[must_use] pub fn special_tokens(mut self, tokens: Vec<AddedToken>) -> Self { self.config.special_tokens = tokens; self } /// Set whether to limit the alphabet #[must_use] pub fn limit_alphabet(mut self, limit: usize) -> Self { self.config.limit_alphabet = Some(limit); self } /// Set the initial alphabet #[must_use] pub fn initial_alphabet(mut self, alphabet: HashSet<char>) -> Self { self.config.initial_alphabet = alphabet; self } /// Set the continuing_subword_prefix #[must_use] pub fn continuing_subword_prefix(mut self, prefix: String) -> Self { self.config.continuing_subword_prefix = Some(prefix); self } /// Set the end_of_word_suffix #[must_use] pub fn end_of_word_suffix(mut self, suffix: String) -> Self { self.config.end_of_word_suffix = Some(suffix); self } /// Set max_token_length #[must_use] pub fn max_token_length(mut self, max_token_length: Option<usize>) -> Self { self.config.max_token_length = max_token_length; self } /// Constructs the final BpeTrainer pub fn build(self) -> BpeTrainer { BpeTrainer { min_frequency: self.config.min_frequency, vocab_size: self.config.vocab_size, show_progress: self.config.show_progress, special_tokens: self.config.special_tokens, limit_alphabet: self.config.limit_alphabet, initial_alphabet: self.config.initial_alphabet, continuing_subword_prefix: self.config.continuing_subword_prefix, end_of_word_suffix: self.config.end_of_word_suffix, max_token_length: self.config.max_token_length, words: HashMap::new(), } } } /// In charge of training a `BPE` model /// /// # Examples /// /// ``` /// use tokenizers::tokenizer::Trainer; /// use tokenizers::models::bpe::{BPE, BpeTrainer}; /// /// let sequences = vec![ "Hello", "World" ]; /// /// let mut trainer = BpeTrainer::default(); /// trainer.feed(sequences.iter(), |s| Ok(vec![s.to_owned()])); /// /// let mut model = BPE::default(); /// let special_tokens = trainer.train(&mut model).unwrap(); /// ``` #[non_exhaustive] #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq)] pub struct BpeTrainer { /// The minimum frequency a pair must have to produce a merge operation pub min_frequency: u64, /// The target vocabulary size pub vocab_size: usize, /// Whether to show progress while training pub show_progress: bool, /// A list of special tokens that the model should know of pub special_tokens: Vec<AddedToken>, /// Whether to limit the number of initial tokens that can be kept before computing merges pub limit_alphabet: Option<usize>, /// The initial alphabet we want absolutely to include. This allows to cover /// some characters that are not necessarily in the training set pub initial_alphabet: HashSet<char>, /// An optional prefix to use on any subword that exist only behind another one pub continuing_subword_prefix: Option<String>, /// An optional suffix to caracterize and end-of-word subword pub end_of_word_suffix: Option<String>, /// An optional parameter to limit the max length of any single token pub max_token_length: Option<usize>, words: HashMap<String, u64>, } impl Default for BpeTrainer { fn default() -> Self { Self::builder().build() } } impl BpeTrainer { pub fn new(min_frequency: u64, vocab_size: usize) -> Self { Self { min_frequency, vocab_size, ..Default::default() } } pub fn builder() -> BpeTrainerBuilder { BpeTrainerBuilder::new() } /// Setup a progress bar if asked to show progress fn setup_progress(&self) -> Option<ProgressBar> { if self.show_progress { let p = ProgressBar::new(0); p.set_style( ProgressStyle::default_bar() .template("[{elapsed_precise}] {msg:<30!} {wide_bar} {pos:<9!}/{len:>9!}") .expect("Invalid progress template"), ); Some(p) } else { None } } /// Set the progress bar in the finish state fn finalize_progress(&self, p: &Option<ProgressBar>, final_len: usize) { if let Some(p) = p { p.set_length(final_len as u64); p.finish(); println!(); } } /// Update the progress bar with the new provided length and message fn update_progress(&self, p: &Option<ProgressBar>, len: usize, message: &'static str) { if let Some(p) = p { p.set_message(message); p.set_length(len as u64); p.reset(); } } /// Add the provided special tokens to the initial vocabulary fn add_special_tokens(&self, w2id: &mut HashMap<String, u32>, id2w: &mut Vec<String>) { for token in &self.special_tokens { if !w2id.contains_key(&token.content) { id2w.push(token.content.to_owned()); w2id.insert(token.content.to_owned(), (id2w.len() - 1) as u32); } } } /// Compute the initial alphabet and limit it if relevant fn compute_alphabet( &self, wc: &HashMap<String, u64>, w2id: &mut HashMap<String, u32>, id2w: &mut Vec<String>, ) { // Compute the alphabet from seen words let mut alphabet: HashMap<char, usize> = HashMap::new(); for (word, count) in wc { for c in word.chars() { alphabet .entry(c) .and_modify(|cnt| *cnt += *count as usize) .or_insert(*count as usize); } } // Also include anything from the provided initial alphabet for c in &self.initial_alphabet { alphabet .entry(*c) .and_modify(|cnt| *cnt = std::usize::MAX) .or_insert(std::usize::MAX); } let mut kept = alphabet.iter().collect::<Vec<_>>(); // Compute the number of chars to remove from the alphabet // If `limit_alphabet < initial_alphabet.len()`, some of these initial characters // will be removed let to_remove = self .limit_alphabet .map(|limit| { if alphabet.len() > limit { alphabet.len() - limit } else { 0 } }) .unwrap_or(0); // Remove the unwanted chars if to_remove > 0 { kept.sort_unstable_by_key(|k| *k.1); kept.drain(..to_remove); } // Keep the initial alphabet (sorted for determinism) kept.sort_unstable_by_key(|k| (*k.0) as u32); kept.into_iter().for_each(|(c, _)| { let s = c.to_string(); if !w2id.contains_key(&s) { id2w.push(s.clone()); w2id.insert(s, (id2w.len() - 1) as u32); } }); } /// Tokenize words and add subwords to the vocabulary when relevant fn tokenize_words( &self, wc: &HashMap<String, u64>, w2id: &mut HashMap<String, u32>, id2w: &mut Vec<String>, p: &Option<ProgressBar>, ) -> (Vec<Word>, Vec<u64>) { let mut words: Vec<Word> = Vec::with_capacity(wc.len()); let mut counts: Vec<u64> = Vec::with_capacity(wc.len()); for (word, count) in wc { let mut current_word = Word::new(); counts.push(*count); for (is_first, is_last, c) in word.chars().with_first_and_last() { let mut s = c.to_string(); if w2id.contains_key(&s) { // Found the initial char in the authorized alphabet // Add the `continuing_subword_prefix` if relevant if !is_first { if let Some(prefix) = &self.continuing_subword_prefix { s = format!("{}{}", prefix, s); } } // Add the `end_of_word_suffix` if relevant if is_last { if let Some(suffix) = &self.end_of_word_suffix { s = format!("{}{}", s, suffix); } } // Insert the new formed string if necessary if !w2id.contains_key(&s) { id2w.push(s.clone()); w2id.insert(s.clone(), (id2w.len() - 1) as u32); } current_word.add(w2id[&s], 1); // We do not care about the len here } } words.push(current_word); if let Some(p) = p { p.inc(1); } } (words, counts) } fn count_pairs( &self, words: &[Word], counts: &[u64], p: &Option<ProgressBar>, ) -> (HashMap<Pair, i32>, HashMap<Pair, HashSet<usize>>) { words .maybe_par_iter() .enumerate() .map(|(i, word)| { let mut pair_counts = HashMap::new(); let mut where_to_update: HashMap<Pair, HashSet<usize>> = HashMap::new(); for window in word.get_chars().windows(2) { let cur_pair: Pair = (window[0], window[1]); // Initialize pair_counts and where_to_update for this pair if we just saw it if !pair_counts.contains_key(&cur_pair) { pair_counts.insert(cur_pair, 0); } // Then update counts let count = counts[i]; where_to_update .entry(cur_pair) .and_modify(|h| { h.insert(i); }) .or_insert_with(|| { let mut h = HashSet::new(); h.insert(i); h }); *pair_counts.get_mut(&cur_pair).unwrap() += count as i32; } if let Some(p) = &p { p.inc(1); } (pair_counts, where_to_update) }) .reduce( || (HashMap::new(), HashMap::new()), |(mut pair_counts, mut where_to_update), (pc, wtu)| { for (k, v) in pc { pair_counts.entry(k).and_modify(|c| *c += v).or_insert(v); } for (k, v) in wtu { where_to_update .entry(k) .and_modify(|set| *set = set.union(&v).copied().collect()) .or_insert(v); } (pair_counts, where_to_update) }, ) } pub fn do_train( &self, word_counts: &HashMap<String, u64>, model: &mut BPE, ) -> Result<Vec<AddedToken>> { let mut word_to_id: HashMap<String, u32> = HashMap::with_capacity(self.vocab_size); let mut id_to_word: Vec<String> = Vec::with_capacity(self.vocab_size); let max_token_length: usize = self.max_token_length.unwrap_or(usize::MAX); let progress = self.setup_progress(); // // 1. Add all special tokens to the vocabulary // self.add_special_tokens(&mut word_to_id, &mut id_to_word); // // 2. Compute the initial alphabet // self.compute_alphabet(word_counts, &mut word_to_id, &mut id_to_word); // // 3. Tokenize words // self.update_progress(&progress, word_counts.len(), "Tokenize words"); let (words, counts) = self.tokenize_words(word_counts, &mut word_to_id, &mut id_to_word, &progress); self.finalize_progress(&progress, words.len()); // // 4. Count pairs in words // self.update_progress(&progress, words.len(), "Count pairs"); let (mut pair_counts, mut where_to_update) = self.count_pairs(&words, &counts, &progress); // Insert them in the queue let mut queue = BinaryHeap::with_capacity(pair_counts.len()); where_to_update.drain().for_each(|(pair, pos)| { let count = pair_counts[&pair]; if count > 0 { queue.push(Merge { pair, count: count as u64, pos, }); } }); self.finalize_progress(&progress, words.len()); // // 5. Do merges // self.update_progress(&progress, self.vocab_size, "Compute merges"); let mut merges: Vec<(Pair, u32)> = vec![]; loop { // Stop as soon as we have a big enough vocabulary if word_to_id.len() >= self.vocab_size { break; } if queue.is_empty() { break; } let mut top = queue.pop().unwrap(); if top.count != pair_counts[&top.pair] as u64 { top.count = pair_counts[&top.pair] as u64; queue.push(top); continue; } if top.count < 1 || self.min_frequency > top.count { break; } let part_a = &id_to_word[top.pair.0 as usize]; let mut part_b = id_to_word[top.pair.1 as usize].to_owned(); // Build new token if let Some(prefix) = &self.continuing_subword_prefix { if part_b.starts_with(prefix) { let prefix_byte_len = prefix.chars().map(|c| c.len_utf8()).sum(); part_b = part_b[prefix_byte_len..].to_string(); } } let new_token = format!("{}{}", part_a, part_b); // implement sentencepiece-like merge. // if this code were to be merged, integrate a way in the python bindings to communicate this variable // default should be 0/None to maintain previous behavior. 16 is the spm default. // Insert new token if it does not already exist let new_token_id = word_to_id .get(&new_token) .copied() .unwrap_or(id_to_word.len() as u32); if word_to_id.get(&new_token).is_none() { id_to_word.push(new_token.clone()); word_to_id.insert(new_token.clone(), new_token_id); } merges.push((top.pair, new_token_id)); // Merge the new pair in every words let changes = top .pos .maybe_par_iter() .flat_map(|&i| { let word = &words[i] as *const _ as *mut Word; // We can merge each of these words in parallel here because each position // can be there only once (HashSet). So this is safe. unsafe { // let word: &mut Word = &mut (*word); (*word) .merge(top.pair.0, top.pair.1, new_token_id, max_token_length) .into_iter() .map(|c| (c, i)) .collect::<Vec<_>>() } }) .collect::<Vec<_>>(); // Introduce new formed pairs for ((pair, change), iw) in changes { let count = change * counts[iw] as i32; pair_counts .entry(pair) .and_modify(|c| *c += count) .or_insert(count); if change > 0 { where_to_update .entry(pair) .and_modify(|h| { h.insert(iw); }) .or_insert_with(|| { let mut h = HashSet::new(); h.insert(iw); h }); } } where_to_update.drain().for_each(|(pair, pos)| { let count = pair_counts[&pair]; if count > 0 { queue.push(Merge { pair, count: count as u64, pos, }); } }); if let Some(p) = &progress { p.inc(1); } } self.finalize_progress(&progress, merges.len()); // Transfer new vocab & options to model model.vocab = word_to_id; model.vocab_r = model .vocab .iter() .map(|(key, val)| (*val, key.to_owned())) .collect(); model.merges = merges .into_iter() .enumerate() .map(|(i, (pair, new_token_id))| (pair, (i as u32, new_token_id))) .collect(); if let Some(prefix) = &self.continuing_subword_prefix { model.continuing_subword_prefix = Some(prefix.to_owned()); } else { model.continuing_subword_prefix = None; } if let Some(suffix) = &self.end_of_word_suffix { model.end_of_word_suffix = Some(suffix.to_owned()); } else { model.end_of_word_suffix = None; } Ok(self.special_tokens.clone()) } } impl Trainer for BpeTrainer { type Model = BPE; /// Train a BPE model fn train(&self, model: &mut BPE) -> Result<Vec<AddedToken>> { self.do_train(&self.words, model) } /// Whether we should show progress fn should_show_progress(&self) -> bool { self.show_progress } fn feed<I, S, F>(&mut self, iterator: I, process: F) -> Result<()> where I: Iterator<Item = S> + Send, S: AsRef<str> + Send, F: Fn(&str) -> Result<Vec<String>> + Sync, { let words: Result<HashMap<String, u64>> = iterator .maybe_par_bridge() .map(|sequence| { let words = process(sequence.as_ref())?; let mut map = HashMap::new(); for word in words { map.entry(word).and_modify(|c| *c += 1).or_insert(1); } Ok(map) }) .reduce( || Ok(HashMap::new()), |acc, ws| { let mut acc = acc?; for (k, v) in ws? { acc.entry(k).and_modify(|c| *c += v).or_insert(v); } Ok(acc) }, ); self.words = words?; Ok(()) } } #[cfg(test)] mod tests { use super::{BpeTrainer, Pair, BPE}; use std::collections::HashMap; #[test] fn test_train() { let word_counts: HashMap<String, u64> = [ ("roses".into(), 1), ("are".into(), 2), ("red".into(), 1), ("voilets".into(), 1), ("blue".into(), 1), ("BERT".into(), 1), ("is".into(), 2), ("big".into(), 1), ("and".into(), 1), ("so".into(), 1), ("GPT-2".into(), 1), ] .iter() .cloned() .collect(); let trainer = BpeTrainer::builder() .show_progress(false) .min_frequency(2) .build(); let mut model = BPE::default(); trainer.do_train(&word_counts, &mut model).unwrap(); // Vocab should contain all of the characters from the `word_counts` mapping // as well as three merges: 're', 'are', and 'is'. let expected_vocab: HashMap<String, u32> = [ ("-".into(), 0), ("2".into(), 1), ("B".into(), 2), ("E".into(), 3), ("G".into(), 4), ("P".into(), 5), ("R".into(), 6), ("T".into(), 7), ("a".into(), 8), ("b".into(), 9), ("d".into(), 10), ("e".into(), 11), ("g".into(), 12), ("i".into(), 13), ("l".into(), 14), ("n".into(), 15), ("o".into(), 16), ("r".into(), 17), ("s".into(), 18), ("t".into(), 19), ("u".into(), 20), ("v".into(), 21), ("re".into(), 22), ("are".into(), 23), ("is".into(), 24), ] .iter() .cloned() .collect(); assert_eq!(model.vocab, expected_vocab); // The keys in `merges` are pairs of symbols, the values are tuples of (rank, id), // where 'rank' determines the order in which this merge will be applied during // tokenization, and 'id' is the vocab id of the symbol resulting from merging // the pair of symbols in the corresponding key. let expected_merges: HashMap<Pair, (u32, u32)> = [ ((17, 11), (0, 22)), // 'r' + 'e' -> 're' ((8, 22), (1, 23)), // 'a' + 're' -> 'are' ((13, 18), (2, 24)), // 'i' + 's' -> 'is' ] .iter() .cloned() .collect(); assert_eq!(model.merges, expected_merges); } #[test] fn bpe_test_max_token_length_16() { /* bpe_test_max_token_length series of tests test the max_token_length flag of bpetrainer // this is the more robust version that only tests max length of learned tokens // (pre) tokenizer settings or vocab can be easily modified when necessary */ let max_token_length = 16; let long_word_counts: HashMap<String, u64> = [ ("singlelongtokenwithoutcasechange", 2), ("singleLongTokenWithCamelCaseChange", 2), ("Longsingletokenwithpunctu@t!onwithin", 2), ("Anotherlongsingletokenwithnumberw1th1n", 2), ("짧은한글문자열짧은한", 2), // korean 10 char ("긴한글문자열긴한글문자열긴한글문", 2), // korean 16 char ("短字符串短字符串短字", 2), //simplified chinese 10 char ("长字符串长字符串长字符串长字符串", 2), // simp. chinese 16 char ("短い文字列短い文字列", 2), // japanese 10 char ("長い文字列長い文字列長い文字列長", 2), // japanese 16 char ("so", 2), ("GPT-2", 2), ] .iter() .map(|(key, value)| (key.to_string(), *value)) .collect(); let trainer = BpeTrainer::builder() .max_token_length(Some(max_token_length)) .show_progress(false) .min_frequency(0) .build(); let mut model = BPE::default(); trainer.do_train(&long_word_counts, &mut model).unwrap(); let vocab = model.get_vocab(); for token in vocab.keys() { assert!( token.chars().count() <= max_token_length, "token too long : {} , chars().count() = {}", token, token.chars().count() ) } } #[test] fn bpe_test_max_token_length_direct_assert() { /* more direct version of bpe_test_max_token_length test // directly compares tokens with known expected values. // maybe unstable depending on specific settings or changes. */ let long_word_counts: HashMap<String, u64> = [ ("sin", 2), ("Sin", 2), ("Lon", 2), ("Ano", 2), ("짧은한", 2), ("긴한글", 2), ("短字符", 2), ("长字符", 2), ("短い文", 2), ("長い文", 2), ("so", 2), ("GP", 2), ] .iter() .map(|(key, value)| (key.to_string(), *value)) .collect(); let trainer = BpeTrainer::builder() .max_token_length(Some(2)) .show_progress(false) .min_frequency(0) .build(); let mut model = BPE::default(); trainer.do_train(&long_word_counts, &mut model).unwrap(); let trained_vocab: HashMap<String, u32> = model.get_vocab(); let expected_vocab: HashMap<String, u32> = [ ("短", 12), ("n", 6), ("i", 5), ("s", 8), ("字符", 23), ("長", 14), ("긴", 17), ("い文", 22), ("L", 2), ("in", 21), ("o", 7), ("은한", 29), ("S", 4), ("P", 3), ("so", 27), ("符", 13), ("文", 11), ("字", 10), ("짧", 19), ("GP", 25), ("글", 16), ("G", 1), ("An", 24), ("长", 15), ("A", 0), ("Lo", 26), ("긴한", 28), ("い", 9), ("한", 20), ("은", 18), ] .iter() .cloned() .map(|(k, v)| (k.to_string(), v)) .collect(); assert_eq!(trained_vocab, expected_vocab) } }
tokenizers/tokenizers/src/models/bpe/trainer.rs/0
{ "file_path": "tokenizers/tokenizers/src/models/bpe/trainer.rs", "repo_id": "tokenizers", "token_count": 15117 }
242
pub mod bert; pub mod precompiled; pub mod prepend; pub mod replace; pub mod strip; pub mod unicode; pub mod utils; pub use crate::normalizers::bert::BertNormalizer; pub use crate::normalizers::precompiled::Precompiled; pub use crate::normalizers::prepend::Prepend; pub use crate::normalizers::replace::Replace; pub use crate::normalizers::strip::{Strip, StripAccents}; pub use crate::normalizers::unicode::{Nmt, NFC, NFD, NFKC, NFKD}; pub use crate::normalizers::utils::{Lowercase, Sequence}; use serde::{Deserialize, Serialize}; use crate::{NormalizedString, Normalizer}; /// Wrapper for known Normalizers. #[derive(Clone, Debug, Deserialize, Serialize)] #[serde(untagged)] pub enum NormalizerWrapper { BertNormalizer(BertNormalizer), StripNormalizer(Strip), StripAccents(StripAccents), NFC(NFC), NFD(NFD), NFKC(NFKC), NFKD(NFKD), Sequence(Sequence), Lowercase(Lowercase), Nmt(Nmt), Precompiled(Precompiled), Replace(Replace), Prepend(Prepend), } impl Normalizer for NormalizerWrapper { fn normalize(&self, normalized: &mut NormalizedString) -> crate::Result<()> { match self { Self::BertNormalizer(bn) => bn.normalize(normalized), Self::StripNormalizer(sn) => sn.normalize(normalized), Self::StripAccents(sn) => sn.normalize(normalized), Self::NFC(nfc) => nfc.normalize(normalized), Self::NFD(nfd) => nfd.normalize(normalized), Self::NFKC(nfkc) => nfkc.normalize(normalized), Self::NFKD(nfkd) => nfkd.normalize(normalized), Self::Sequence(sequence) => sequence.normalize(normalized), Self::Lowercase(lc) => lc.normalize(normalized), Self::Nmt(lc) => lc.normalize(normalized), Self::Precompiled(lc) => lc.normalize(normalized), Self::Replace(lc) => lc.normalize(normalized), Self::Prepend(lc) => lc.normalize(normalized), } } } impl_enum_from!(BertNormalizer, NormalizerWrapper, BertNormalizer); impl_enum_from!(NFKD, NormalizerWrapper, NFKD); impl_enum_from!(NFKC, NormalizerWrapper, NFKC); impl_enum_from!(NFC, NormalizerWrapper, NFC); impl_enum_from!(NFD, NormalizerWrapper, NFD); impl_enum_from!(Strip, NormalizerWrapper, StripNormalizer); impl_enum_from!(StripAccents, NormalizerWrapper, StripAccents); impl_enum_from!(Sequence, NormalizerWrapper, Sequence); impl_enum_from!(Lowercase, NormalizerWrapper, Lowercase); impl_enum_from!(Nmt, NormalizerWrapper, Nmt); impl_enum_from!(Precompiled, NormalizerWrapper, Precompiled); impl_enum_from!(Replace, NormalizerWrapper, Replace); impl_enum_from!(Prepend, NormalizerWrapper, Prepend);
tokenizers/tokenizers/src/normalizers/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/normalizers/mod.rs", "repo_id": "tokenizers", "token_count": 1090 }
243
mod pre_tokenizer; mod scripts; // Re-export the PreTokenizer pub use pre_tokenizer::UnicodeScripts;
tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs/0
{ "file_path": "tokenizers/tokenizers/src/pre_tokenizers/unicode_scripts/mod.rs", "repo_id": "tokenizers", "token_count": 35 }
244
use std::borrow::Borrow; use std::collections::HashMap; use std::hash::Hash; use std::sync::RwLock; /// The default capacity for a `BPE`'s internal cache. pub static DEFAULT_CACHE_CAPACITY: usize = 10_000; /// Provides a simple multithread cache to speed up BPE tokenization that will try to read values /// concurrently but won't block if another thread is writing. /// The goal is clearly not the accuracy of the content, both get and set /// are not guaranteed to actually get or set. #[derive(Debug)] pub(crate) struct Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { map: RwLock<HashMap<K, V>>, pub capacity: usize, } // We dont really care about Cache comparison, so let's make them always equal impl<K, V> PartialEq for Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { fn eq(&self, _other: &Cache<K, V>) -> bool { true } } impl<K, V> Default for Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { fn default() -> Self { Self::new(DEFAULT_CACHE_CAPACITY) } } impl<K, V> Cache<K, V> where K: Eq + Hash + Clone, V: Clone, { /// Create new `Cache` with the given capacity. pub(crate) fn new(capacity: usize) -> Self { let map = RwLock::new(HashMap::with_capacity(capacity)); Cache { map, capacity } } /// Create a fresh `Cache` with the same configuration. pub(crate) fn fresh(&self) -> Self { Self::new(self.capacity) } /// Clear the cache. pub(crate) fn clear(&self) { self.map.write().unwrap().clear(); } #[allow(dead_code)] pub(crate) fn get_values<'a, I, Q>(&self, keys_iter: I) -> Option<Vec<Option<V>>> where I: Iterator<Item = &'a Q>, K: Borrow<Q>, Q: Hash + Eq + ?Sized + 'a, { if let Ok(ref mut cache) = self.map.try_read() { Some(keys_iter.map(|k| cache.get(k).cloned()).collect()) } else { None } } pub(crate) fn get<Q>(&self, key: &Q) -> Option<V> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { if let Ok(ref mut cache) = self.map.try_read() { cache.get(key).cloned() } else { None } } pub(crate) fn set_values<I>(&self, entries: I) where I: IntoIterator<Item = (K, V)>, { // Before trying to acquire a write lock, we check if we are already at // capacity with a read handler. if let Ok(cache) = self.map.try_read() { if cache.len() >= self.capacity { // At capacity, so do nothing. return; } } else { // If we couldn't acquire a read handle then we probably won't be able to acquire // a write handle one quadrillionth of a second later. return; } // Not at capacity, so try acquiring a write handle. if let Ok(mut cache) = self.map.try_write() { let free = self.capacity - cache.len(); cache.extend(entries.into_iter().take(free)); } } pub(crate) fn set(&self, key: K, value: V) { self.set_values(std::iter::once((key, value))) } }
tokenizers/tokenizers/src/utils/cache.rs/0
{ "file_path": "tokenizers/tokenizers/src/utils/cache.rs", "repo_id": "tokenizers", "token_count": 1436 }
245